Browse Source

bump to master

Victor Vieux 12 years ago
parent
commit
6fce89e60b
100 changed files with 4361 additions and 2984 deletions
  1. 3 1
      .mailmap
  2. 15 0
      AUTHORS
  3. 13 13
      README.md
  4. 4 2
      Vagrantfile
  5. 94 71
      api.go
  6. 5 0
      api_params.go
  7. 122 74
      api_test.go
  8. 11 4
      auth/auth.go
  9. 50 0
      auth/auth_test.go
  10. 0 20
      buildbot/README.rst
  11. 0 28
      buildbot/Vagrantfile
  12. 0 43
      buildbot/buildbot-cfg/buildbot-cfg.sh
  13. 0 32
      buildbot/buildbot.pp
  14. 4 349
      builder.go
  15. 311 0
      builder_client.go
  16. 0 88
      builder_test.go
  17. 207 166
      commands.go
  18. 1 0
      commands_test.go
  19. 40 35
      container.go
  20. 8 2
      container_test.go
  21. 2 1
      docker/docker.go
  22. 3 8
      docs/Makefile
  23. 0 0
      docs/sources/.nojekyll
  24. 0 1
      docs/sources/CNAME
  25. 17 15
      docs/sources/api/docker_remote_api.rst
  26. 17 0
      docs/sources/api/index.rst
  27. 6 1
      docs/sources/api/index_search_api.rst
  28. 17 12
      docs/sources/api/registry_api.rst
  29. 0 14
      docs/sources/builder/index.rst
  30. 2 2
      docs/sources/commandline/cli.rst
  31. 1 0
      docs/sources/commandline/command/commit.rst
  32. 1 0
      docs/sources/commandline/command/run.rst
  33. 28 3
      docs/sources/commandline/index.rst
  34. 0 42
      docs/sources/commandline/workingwithrepository.rst
  35. 0 0
      docs/sources/concepts/images/lego_docker.jpg
  36. 1 1
      docs/sources/concepts/index.rst
  37. 1 3
      docs/sources/concepts/introduction.rst
  38. 1 1
      docs/sources/conf.py
  39. 1 1
      docs/sources/contributing/devenvironment.rst
  40. 0 2
      docs/sources/dotcloud.yml
  41. 1 1
      docs/sources/examples/couchdb_data_volumes.rst
  42. 2 2
      docs/sources/examples/python_web_app.rst
  43. 3 3
      docs/sources/faq.rst
  44. 0 210
      docs/sources/gettingstarted/index.html
  45. 0 314
      docs/sources/index.html
  46. 121 19
      docs/sources/index.rst
  47. 0 15
      docs/sources/index/index.rst
  48. 23 0
      docs/sources/index/variable.rst
  49. 1 1
      docs/sources/installation/amazon.rst
  50. 27 17
      docs/sources/installation/binaries.rst
  51. 3 1
      docs/sources/installation/index.rst
  52. 149 0
      docs/sources/installation/kernel.rst
  53. 91 0
      docs/sources/installation/rackspace.rst
  54. 73 11
      docs/sources/installation/ubuntulinux.rst
  55. 30 15
      docs/sources/installation/upgrading.rst
  56. 3 7
      docs/sources/installation/vagrant.rst
  57. 2 2
      docs/sources/installation/windows.rst
  58. 0 6
      docs/sources/nginx.conf
  59. 0 15
      docs/sources/registry/index.rst
  60. 0 15
      docs/sources/remote-api/index.rst
  61. 22 0
      docs/sources/toctree.rst
  62. 2 2
      docs/sources/use/basics.rst
  63. 4 3
      docs/sources/use/builder.rst
  64. 19 0
      docs/sources/use/index.rst
  65. 109 0
      docs/sources/use/puppet.rst
  66. 75 0
      docs/sources/use/workingwithrepository.rst
  67. 78 60
      docs/theme/docker/layout.html
  68. BIN
      docs/theme/docker/static/img/hiring_graphic.png
  69. 4 2
      docs/website/gettingstarted/index.html
  70. 18 2
      docs/website/index.html
  71. 2 1
      getKernelVersion_darwin.go
  72. 4 2
      getKernelVersion_linux.go
  73. 20 6
      graph.go
  74. 2 1
      graph_test.go
  75. 1 1
      hack/dockerbuilder/Dockerfile
  76. 14 1
      image.go
  77. 3 0
      lxc_template.go
  78. 11 10
      network.go
  79. 43 19
      packaging/debian/Makefile
  80. 9 12
      packaging/debian/README.Debian
  81. 13 17
      packaging/debian/Vagrantfile
  82. 11 9
      packaging/debian/changelog
  83. 6 5
      packaging/debian/control
  84. 7 222
      packaging/debian/copyright
  85. 0 49
      packaging/debian/docker.initd
  86. 1149 0
      packaging/debian/lxc-docker.1
  87. 74 0
      packaging/debian/lxc-docker.init
  88. 0 13
      packaging/debian/lxc-docker.postinst
  89. 0 6
      packaging/debian/rules
  90. 0 748
      registry.go
  91. 471 0
      registry/registry.go
  92. 168 0
      registry/registry_test.go
  93. 18 24
      runtime.go
  94. 5 2
      runtime_test.go
  95. 359 76
      server.go
  96. 2 1
      state.go
  97. 2 1
      tags.go
  98. 21 0
      term/term.go
  99. 44 0
      testing/README.rst
  100. 56 0
      testing/Vagrantfile

+ 3 - 1
.mailmap

@@ -2,7 +2,7 @@
 <charles.hooper@dotcloud.com> <chooper@plumata.com> 
 <charles.hooper@dotcloud.com> <chooper@plumata.com> 
 <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
 <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
 <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
 <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
-Guillaume J. Charmes <guillaume.charmes@dotcloud.com> creack <charmes.guillaume@gmail.com>
+Guillaume J. Charmes <guillaume.charmes@dotcloud.com> <charmes.guillaume@gmail.com>
 <guillaume.charmes@dotcloud.com> <guillaume@dotcloud.com>
 <guillaume.charmes@dotcloud.com> <guillaume@dotcloud.com>
 <kencochrane@gmail.com> <KenCochrane@gmail.com>
 <kencochrane@gmail.com> <KenCochrane@gmail.com>
 <sridharr@activestate.com> <github@srid.name>
 <sridharr@activestate.com> <github@srid.name>
@@ -16,4 +16,6 @@ Tim Terhorst <mynamewastaken+git@gmail.com>
 Andy Smith <github@anarkystic.com>
 Andy Smith <github@anarkystic.com>
 <kalessin@kalessin.fr> <louis@dotcloud.com>
 <kalessin@kalessin.fr> <louis@dotcloud.com>
 <victor.vieux@dotcloud.com> <victor@dotcloud.com>
 <victor.vieux@dotcloud.com> <victor@dotcloud.com>
+<victor.vieux@dotcloud.com> <dev@vvieux.com>
 <dominik@honnef.co> <dominikh@fork-bomb.org>
 <dominik@honnef.co> <dominikh@fork-bomb.org>
+Thatcher Peskens <thatcher@dotcloud.com>

+ 15 - 0
AUTHORS

@@ -1,24 +1,34 @@
+Al Tobey <al@ooyala.com>
+Alexey Shamrin <shamrin@gmail.com>
 Andrea Luzzardi <aluzzardi@gmail.com>
 Andrea Luzzardi <aluzzardi@gmail.com>
 Andy Rothfusz <github@metaliveblog.com>
 Andy Rothfusz <github@metaliveblog.com>
 Andy Smith <github@anarkystic.com>
 Andy Smith <github@anarkystic.com>
 Antony Messerli <amesserl@rackspace.com>
 Antony Messerli <amesserl@rackspace.com>
+Barry Allard <barry.allard@gmail.com>
+Brandon Liu <bdon@bdon.org>
 Brian McCallister <brianm@skife.org>
 Brian McCallister <brianm@skife.org>
+Bruno Bigras <bigras.bruno@gmail.com>
 Caleb Spare <cespare@gmail.com>
 Caleb Spare <cespare@gmail.com>
 Charles Hooper <charles.hooper@dotcloud.com>
 Charles Hooper <charles.hooper@dotcloud.com>
 Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
 Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
 Daniel Robinson <gottagetmac@gmail.com>
 Daniel Robinson <gottagetmac@gmail.com>
+Daniel Von Fange <daniel@leancoder.com>
 Dominik Honnef <dominik@honnef.co>
 Dominik Honnef <dominik@honnef.co>
 Don Spaulding <donspauldingii@gmail.com>
 Don Spaulding <donspauldingii@gmail.com>
+Dr Nic Williams <drnicwilliams@gmail.com>
+Evan Wies <evan@neomantra.net>
 ezbercih <cem.ezberci@gmail.com>
 ezbercih <cem.ezberci@gmail.com>
 Flavio Castelli <fcastelli@suse.com>
 Flavio Castelli <fcastelli@suse.com>
 Francisco Souza <f@souza.cc>
 Francisco Souza <f@souza.cc>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
 Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
+Harley Laue <losinggeneration@gmail.com>
 Hunter Blanks <hunter@twilio.com>
 Hunter Blanks <hunter@twilio.com>
 Jeff Lindsay <progrium@gmail.com>
 Jeff Lindsay <progrium@gmail.com>
 Jeremy Grosser <jeremy@synack.me>
 Jeremy Grosser <jeremy@synack.me>
 Joffrey F <joffrey@dotcloud.com>
 Joffrey F <joffrey@dotcloud.com>
 John Costa <john.costa@gmail.com>
 John Costa <john.costa@gmail.com>
+Jonas Pfenniger <jonas@pfenniger.name>
 Jonathan Rudenberg <jonathan@titanous.com>
 Jonathan Rudenberg <jonathan@titanous.com>
 Julien Barbier <write0@gmail.com>
 Julien Barbier <write0@gmail.com>
 Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
 Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
@@ -27,8 +37,11 @@ Kevin J. Lynagh <kevin@keminglabs.com>
 Louis Opter <kalessin@kalessin.fr>
 Louis Opter <kalessin@kalessin.fr>
 Maxim Treskin <zerthurd@gmail.com>
 Maxim Treskin <zerthurd@gmail.com>
 Mikhail Sobolev <mss@mawhrin.net>
 Mikhail Sobolev <mss@mawhrin.net>
+Nate Jones <nate@endot.org>
 Nelson Chen <crazysim@gmail.com>
 Nelson Chen <crazysim@gmail.com>
 Niall O'Higgins <niallo@unworkable.org>
 Niall O'Higgins <niallo@unworkable.org>
+odk- <github@odkurzacz.org>
+Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Hammond <paul@paulhammond.org>
 Paul Hammond <paul@paulhammond.org>
 Piotr Bogdan <ppbogdan@gmail.com>
 Piotr Bogdan <ppbogdan@gmail.com>
 Robert Obryk <robryk@gmail.com>
 Robert Obryk <robryk@gmail.com>
@@ -38,6 +51,8 @@ Silas Sewell <silas@sewell.org>
 Solomon Hykes <solomon@dotcloud.com>
 Solomon Hykes <solomon@dotcloud.com>
 Sridhar Ratnakumar <sridharr@activestate.com>
 Sridhar Ratnakumar <sridharr@activestate.com>
 Thatcher Peskens <thatcher@dotcloud.com>
 Thatcher Peskens <thatcher@dotcloud.com>
+Thomas Bikeev <thomas.bikeev@mac.com>
+Tianon Gravi <admwiggin@gmail.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
 Troy Howard <thoward37@gmail.com>
 Troy Howard <thoward37@gmail.com>
 unclejack <unclejacksons@gmail.com>
 unclejack <unclejacksons@gmail.com>

+ 13 - 13
README.md

@@ -35,7 +35,7 @@ for containerization, including Linux with [openvz](http://openvz.org), [vserver
 
 
 Docker builds on top of these low-level primitives to offer developers a portable format and runtime environment that solves
 Docker builds on top of these low-level primitives to offer developers a portable format and runtime environment that solves
 all 4 problems. Docker containers are small (and their transfer can be optimized with layers), they have basically zero memory and cpu overhead,
 all 4 problems. Docker containers are small (and their transfer can be optimized with layers), they have basically zero memory and cpu overhead,
-the are completely portable and are designed from the ground up with an application-centric design.
+they are completely portable and are designed from the ground up with an application-centric design.
 
 
 The best part: because docker operates at the OS level, it can still be run inside a VM!
 The best part: because docker operates at the OS level, it can still be run inside a VM!
 
 
@@ -46,7 +46,7 @@ Docker does not require that you buy into a particular programming language, fra
 Is your application a unix process? Does it use files, tcp connections, environment variables, standard unix streams and command-line
 Is your application a unix process? Does it use files, tcp connections, environment variables, standard unix streams and command-line
 arguments as inputs and outputs? Then docker can run it.
 arguments as inputs and outputs? Then docker can run it.
 
 
-Can your application's build be expressed a sequence of such commands? Then docker can build it.
+Can your application's build be expressed as a sequence of such commands? Then docker can build it.
 
 
 
 
 ## Escape dependency hell
 ## Escape dependency hell
@@ -70,21 +70,21 @@ Docker solves dependency hell by giving the developer a simple way to express *a
 and streamline the process of assembling them. If this makes you think of [XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
 and streamline the process of assembling them. If this makes you think of [XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
 *replace* your favorite packaging systems. It simply orchestrates their use in a simple and repeatable way. How does it do that? With layers.
 *replace* your favorite packaging systems. It simply orchestrates their use in a simple and repeatable way. How does it do that? With layers.
 
 
-Docker defines a build as running a sequence unix commands, one after the other, in the same container. Build commands modify the contents of the container
+Docker defines a build as running a sequence of unix commands, one after the other, in the same container. Build commands modify the contents of the container
 (usually by installing new files on the filesystem), the next command modifies it some more, etc. Since each build command inherits the result of the previous
 (usually by installing new files on the filesystem), the next command modifies it some more, etc. Since each build command inherits the result of the previous
 commands, the *order* in which the commands are executed expresses *dependencies*.
 commands, the *order* in which the commands are executed expresses *dependencies*.
 
 
 Here's a typical docker build process:
 Here's a typical docker build process:
 
 
 ```bash
 ```bash
-from	ubuntu:12.10
-run	apt-get update
-run	apt-get install python
-run	apt-get install python-pip
-run	pip install django
-run	apt-get install curl
-run	curl http://github.com/shykes/helloflask/helloflask/master.tar.gz | tar -zxv
-run	cd master && pip install -r requirements.txt
+from ubuntu:12.10
+run apt-get update
+run DEBIAN_FRONTEND=noninteractive apt-get install -q -y python
+run DEBIAN_FRONTEND=noninteractive apt-get install -q -y python-pip
+run pip install django
+run DEBIAN_FRONTEND=noninteractive apt-get install -q -y curl
+run curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
+run cd helloflask-master && pip install -r requirements.txt
 ```
 ```
 
 
 Note that Docker doesn't care *how* dependencies are built - as long as they can be built by running a unix command in a container.
 Note that Docker doesn't care *how* dependencies are built - as long as they can be built by running a unix command in a container.
@@ -293,7 +293,7 @@ a format that is self-describing and portable, so that any compliant runtime can
 
 
 The spec for Standard Containers is currently a work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
 The spec for Standard Containers is currently a work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
 
 
-A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
+A great analogy for this is the shipping container. Just like how Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
 
 
 ### 1. STANDARD OPERATIONS
 ### 1. STANDARD OPERATIONS
 
 
@@ -321,7 +321,7 @@ Similarly, before Standard Containers, by the time a software component ran in p
 
 
 ### 5. INDUSTRIAL-GRADE DELIVERY
 ### 5. INDUSTRIAL-GRADE DELIVERY
 
 
-There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
+There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded onto the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
 
 
 With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
 With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
 
 

+ 4 - 2
Vagrantfile

@@ -3,6 +3,8 @@
 
 
 BOX_NAME = ENV['BOX_NAME'] || "ubuntu"
 BOX_NAME = ENV['BOX_NAME'] || "ubuntu"
 BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box"
 BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box"
+AWS_REGION = ENV['AWS_REGION'] || "us-east-1"
+AWS_AMI    = ENV['AWS_AMI']    || "ami-d0f89fb9"
 
 
 Vagrant::Config.run do |config|
 Vagrant::Config.run do |config|
   # Setup virtual machine box. This VM configuration code is always executed.
   # Setup virtual machine box. This VM configuration code is always executed.
@@ -49,8 +51,8 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
     aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
     aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
     override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
     override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
     override.ssh.username = "ubuntu"
     override.ssh.username = "ubuntu"
-    aws.region = "us-east-1"
-    aws.ami = "ami-d0f89fb9"
+    aws.region = AWS_REGION
+    aws.ami    = AWS_AMI
     aws.instance_type = "t1.micro"
     aws.instance_type = "t1.micro"
   end
   end
 
 

+ 94 - 71
api.go

@@ -4,8 +4,8 @@ import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/auth"
 	"github.com/dotcloud/docker/auth"
+	"github.com/dotcloud/docker/utils"
 	"github.com/gorilla/mux"
 	"github.com/gorilla/mux"
-	"github.com/shin-/cookiejar"
 	"io"
 	"io"
 	"log"
 	"log"
 	"net/http"
 	"net/http"
@@ -34,6 +34,8 @@ func parseForm(r *http.Request) error {
 func httpError(w http.ResponseWriter, err error) {
 func httpError(w http.ResponseWriter, err error) {
 	if strings.HasPrefix(err.Error(), "No such") {
 	if strings.HasPrefix(err.Error(), "No such") {
 		http.Error(w, err.Error(), http.StatusNotFound)
 		http.Error(w, err.Error(), http.StatusNotFound)
+	} else if strings.HasPrefix(err.Error(), "Bad parameter") {
+		http.Error(w, err.Error(), http.StatusBadRequest)
 	} else {
 	} else {
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 	}
 	}
@@ -44,12 +46,18 @@ func writeJson(w http.ResponseWriter, b []byte) {
 	w.Write(b)
 	w.Write(b)
 }
 }
 
 
-func getAuth(srv *Server, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	config := &auth.AuthConfig{
-		Username: srv.runtime.authConfig.Username,
-		Email:    srv.runtime.authConfig.Email,
+func getBoolParam(value string) (bool, error) {
+	if value == "1" || strings.ToLower(value) == "true" {
+		return true, nil
+	}
+	if value == "" || value == "0" || strings.ToLower(value) == "false" {
+		return false, nil
 	}
 	}
-	b, err := json.Marshal(config)
+	return false, fmt.Errorf("Bad parameter")
+}
+
+func getAuth(srv *Server, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	b, err := json.Marshal(srv.registry.GetAuthConfig())
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -63,18 +71,17 @@ func postAuth(srv *Server, w http.ResponseWriter, r *http.Request, vars map[stri
 		return err
 		return err
 	}
 	}
 
 
-	if config.Username == srv.runtime.authConfig.Username {
-		config.Password = srv.runtime.authConfig.Password
+	if config.Username == srv.registry.GetAuthConfig().Username {
+		config.Password = srv.registry.GetAuthConfig().Password
 	}
 	}
 
 
 	newAuthConfig := auth.NewAuthConfig(config.Username, config.Password, config.Email, srv.runtime.root)
 	newAuthConfig := auth.NewAuthConfig(config.Username, config.Password, config.Email, srv.runtime.root)
 	status, err := auth.Login(newAuthConfig)
 	status, err := auth.Login(newAuthConfig)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
-	} else {
-		srv.runtime.graph.getHttpClient().Jar = cookiejar.NewCookieJar()
-		srv.runtime.authConfig = newAuthConfig
 	}
 	}
+	srv.registry.ResetClient(newAuthConfig)
+
 	if status != "" {
 	if status != "" {
 		b, err := json.Marshal(&ApiAuth{Status: status})
 		b, err := json.Marshal(&ApiAuth{Status: status})
 		if err != nil {
 		if err != nil {
@@ -116,8 +123,8 @@ func getContainersExport(srv *Server, w http.ResponseWriter, r *http.Request, va
 	name := vars["name"]
 	name := vars["name"]
 
 
 	if err := srv.ContainerExport(name, w); err != nil {
 	if err := srv.ContainerExport(name, w); err != nil {
-		Debugf("%s", err.Error())
-		//return nil, err
+		utils.Debugf("%s", err.Error())
+		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -127,11 +134,13 @@ func getImagesJson(srv *Server, w http.ResponseWriter, r *http.Request, vars map
 		return err
 		return err
 	}
 	}
 
 
-	all := r.Form.Get("all") == "1"
+	all, err := getBoolParam(r.Form.Get("all"))
+	if err != nil {
+		return err
+	}
 	filter := r.Form.Get("filter")
 	filter := r.Form.Get("filter")
-	only_ids := r.Form.Get("only_ids") == "1"
 
 
-	outs, err := srv.Images(all, only_ids, filter)
+	outs, err := srv.Images(all, filter)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -198,9 +207,10 @@ func getContainersPs(srv *Server, w http.ResponseWriter, r *http.Request, vars m
 	if err := parseForm(r); err != nil {
 	if err := parseForm(r); err != nil {
 		return err
 		return err
 	}
 	}
-	all := r.Form.Get("all") == "1"
-	trunc_cmd := r.Form.Get("trunc_cmd") != "0"
-	only_ids := r.Form.Get("only_ids") == "1"
+	all, err := getBoolParam(r.Form.Get("all"))
+	if err != nil {
+		return err
+	}
 	since := r.Form.Get("since")
 	since := r.Form.Get("since")
 	before := r.Form.Get("before")
 	before := r.Form.Get("before")
 	n, err := strconv.Atoi(r.Form.Get("limit"))
 	n, err := strconv.Atoi(r.Form.Get("limit"))
@@ -208,7 +218,7 @@ func getContainersPs(srv *Server, w http.ResponseWriter, r *http.Request, vars m
 		n = -1
 		n = -1
 	}
 	}
 
 
-	outs := srv.Containers(all, trunc_cmd, only_ids, n, since, before)
+	outs := srv.Containers(all, n, since, before)
 	b, err := json.Marshal(outs)
 	b, err := json.Marshal(outs)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -227,7 +237,10 @@ func postImagesTag(srv *Server, w http.ResponseWriter, r *http.Request, vars map
 		return fmt.Errorf("Missing parameter")
 		return fmt.Errorf("Missing parameter")
 	}
 	}
 	name := vars["name"]
 	name := vars["name"]
-	force := r.Form.Get("force") == "1"
+	force, err := getBoolParam(r.Form.Get("force"))
+	if err != nil {
+		return err
+	}
 
 
 	if err := srv.ContainerTag(name, repo, tag, force); err != nil {
 	if err := srv.ContainerTag(name, repo, tag, force); err != nil {
 		return err
 		return err
@@ -242,7 +255,7 @@ func postCommit(srv *Server, w http.ResponseWriter, r *http.Request, vars map[st
 	}
 	}
 	config := &Config{}
 	config := &Config{}
 	if err := json.NewDecoder(r.Body).Decode(config); err != nil {
 	if err := json.NewDecoder(r.Body).Decode(config); err != nil {
-		Debugf("%s", err.Error())
+		utils.Debugf("%s", err.Error())
 	}
 	}
 	repo := r.Form.Get("repo")
 	repo := r.Form.Get("repo")
 	tag := r.Form.Get("tag")
 	tag := r.Form.Get("tag")
@@ -270,23 +283,17 @@ func postImagesCreate(srv *Server, w http.ResponseWriter, r *http.Request, vars
 
 
 	src := r.Form.Get("fromSrc")
 	src := r.Form.Get("fromSrc")
 	image := r.Form.Get("fromImage")
 	image := r.Form.Get("fromImage")
-	repo := r.Form.Get("repo")
 	tag := r.Form.Get("tag")
 	tag := r.Form.Get("tag")
+	repo := r.Form.Get("repo")
 
 
-	in, out, err := hijackServer(w)
-	if err != nil {
-		return err
-	}
-	defer in.Close()
-	fmt.Fprintf(out, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
 	if image != "" { //pull
 	if image != "" { //pull
 		registry := r.Form.Get("registry")
 		registry := r.Form.Get("registry")
-		if err := srv.ImagePull(image, tag, registry, out); err != nil {
-			fmt.Fprintf(out, "Error: %s\n", err)
+		if err := srv.ImagePull(image, tag, registry, w); err != nil {
+			return err
 		}
 		}
 	} else { //import
 	} else { //import
-		if err := srv.ImageImport(src, repo, tag, in, out); err != nil {
-			fmt.Fprintf(out, "Error: %s\n", err)
+		if err := srv.ImageImport(src, repo, tag, r.Body, w); err != nil {
+			return err
 		}
 		}
 	}
 	}
 	return nil
 	return nil
@@ -322,15 +329,9 @@ func postImagesInsert(srv *Server, w http.ResponseWriter, r *http.Request, vars
 	}
 	}
 	name := vars["name"]
 	name := vars["name"]
 
 
-	in, out, err := hijackServer(w)
-	if err != nil {
+	if err := srv.ImageInsert(name, url, path, w); err != nil {
 		return err
 		return err
 	}
 	}
-	defer in.Close()
-	fmt.Fprintf(out, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
-	if err := srv.ImageInsert(name, url, path, out); err != nil {
-		fmt.Fprintf(out, "Error: %s\n", err)
-	}
 	return nil
 	return nil
 }
 }
 
 
@@ -338,7 +339,6 @@ func postImagesPush(srv *Server, w http.ResponseWriter, r *http.Request, vars ma
 	if err := parseForm(r); err != nil {
 	if err := parseForm(r); err != nil {
 		return err
 		return err
 	}
 	}
-
 	registry := r.Form.Get("registry")
 	registry := r.Form.Get("registry")
 
 
 	if vars == nil {
 	if vars == nil {
@@ -346,28 +346,9 @@ func postImagesPush(srv *Server, w http.ResponseWriter, r *http.Request, vars ma
 	}
 	}
 	name := vars["name"]
 	name := vars["name"]
 
 
-	in, out, err := hijackServer(w)
-	if err != nil {
+	if err := srv.ImagePush(name, registry, w); err != nil {
 		return err
 		return err
 	}
 	}
-	defer in.Close()
-	fmt.Fprintf(out, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
-	if err := srv.ImagePush(name, registry, out); err != nil {
-		fmt.Fprintln(out, "Error: %s\n", err)
-	}
-	return nil
-}
-
-func postBuild(srv *Server, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	in, out, err := hijackServer(w)
-	if err != nil {
-		return err
-	}
-	defer in.Close()
-	fmt.Fprintf(out, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
-	if err := srv.ImageCreateFromFile(in, out); err != nil {
-		fmt.Fprintln(out, "Error: %s\n", err)
-	}
 	return nil
 	return nil
 }
 }
 
 
@@ -428,7 +409,10 @@ func deleteContainers(srv *Server, w http.ResponseWriter, r *http.Request, vars
 		return fmt.Errorf("Missing parameter")
 		return fmt.Errorf("Missing parameter")
 	}
 	}
 	name := vars["name"]
 	name := vars["name"]
-	removeVolume := r.Form.Get("v") == "1"
+	removeVolume, err := getBoolParam(r.Form.Get("v"))
+	if err != nil {
+		return err
+	}
 
 
 	if err := srv.ContainerDestroy(name, removeVolume); err != nil {
 	if err := srv.ContainerDestroy(name, removeVolume); err != nil {
 		return err
 		return err
@@ -503,11 +487,27 @@ func postContainersAttach(srv *Server, w http.ResponseWriter, r *http.Request, v
 	if err := parseForm(r); err != nil {
 	if err := parseForm(r); err != nil {
 		return err
 		return err
 	}
 	}
-	logs := r.Form.Get("logs") == "1"
-	stream := r.Form.Get("stream") == "1"
-	stdin := r.Form.Get("stdin") == "1"
-	stdout := r.Form.Get("stdout") == "1"
-	stderr := r.Form.Get("stderr") == "1"
+	logs, err := getBoolParam(r.Form.Get("logs"))
+	if err != nil {
+		return err
+	}
+	stream, err := getBoolParam(r.Form.Get("stream"))
+	if err != nil {
+		return err
+	}
+	stdin, err := getBoolParam(r.Form.Get("stdin"))
+	if err != nil {
+		return err
+	}
+	stdout, err := getBoolParam(r.Form.Get("stdout"))
+	if err != nil {
+		return err
+	}
+	stderr, err := getBoolParam(r.Form.Get("stderr"))
+	if err != nil {
+		return err
+	}
+
 	if vars == nil {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 		return fmt.Errorf("Missing parameter")
 	}
 	}
@@ -562,6 +562,29 @@ func getImagesByName(srv *Server, w http.ResponseWriter, r *http.Request, vars m
 	return nil
 	return nil
 }
 }
 
 
+func postImagesGetCache(srv *Server, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	apiConfig := &ApiImageConfig{}
+	if err := json.NewDecoder(r.Body).Decode(apiConfig); err != nil {
+		return err
+	}
+
+	image, err := srv.ImageGetCached(apiConfig.Id, apiConfig.Config)
+	if err != nil {
+		return err
+	}
+	if image == nil {
+		w.WriteHeader(http.StatusNotFound)
+		return nil
+	}
+	apiId := &ApiId{Id: image.Id}
+	b, err := json.Marshal(apiId)
+	if err != nil {
+		return err
+	}
+	writeJson(w, b)
+	return nil
+}
+
 func ListenAndServe(addr string, srv *Server, logging bool) error {
 func ListenAndServe(addr string, srv *Server, logging bool) error {
 	r := mux.NewRouter()
 	r := mux.NewRouter()
 	log.Printf("Listening for HTTP on %s\n", addr)
 	log.Printf("Listening for HTTP on %s\n", addr)
@@ -584,11 +607,11 @@ func ListenAndServe(addr string, srv *Server, logging bool) error {
 		"POST": {
 		"POST": {
 			"/auth":                         postAuth,
 			"/auth":                         postAuth,
 			"/commit":                       postCommit,
 			"/commit":                       postCommit,
-			"/build":                        postBuild,
 			"/images/create":                postImagesCreate,
 			"/images/create":                postImagesCreate,
 			"/images/{name:.*}/insert":      postImagesInsert,
 			"/images/{name:.*}/insert":      postImagesInsert,
 			"/images/{name:.*}/push":        postImagesPush,
 			"/images/{name:.*}/push":        postImagesPush,
 			"/images/{name:.*}/tag":         postImagesTag,
 			"/images/{name:.*}/tag":         postImagesTag,
+			"/images/getCache":              postImagesGetCache,
 			"/containers/create":            postContainersCreate,
 			"/containers/create":            postContainersCreate,
 			"/containers/{name:.*}/kill":    postContainersKill,
 			"/containers/{name:.*}/kill":    postContainersKill,
 			"/containers/{name:.*}/restart": postContainersRestart,
 			"/containers/{name:.*}/restart": postContainersRestart,
@@ -605,20 +628,20 @@ func ListenAndServe(addr string, srv *Server, logging bool) error {
 
 
 	for method, routes := range m {
 	for method, routes := range m {
 		for route, fct := range routes {
 		for route, fct := range routes {
-			Debugf("Registering %s, %s", method, route)
+			utils.Debugf("Registering %s, %s", method, route)
 			// NOTE: scope issue, make sure the variables are local and won't be changed
 			// NOTE: scope issue, make sure the variables are local and won't be changed
 			localRoute := route
 			localRoute := route
 			localMethod := method
 			localMethod := method
 			localFct := fct
 			localFct := fct
 			r.Path(localRoute).Methods(localMethod).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 			r.Path(localRoute).Methods(localMethod).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-				Debugf("Calling %s %s", localMethod, localRoute)
+				utils.Debugf("Calling %s %s", localMethod, localRoute)
 				if logging {
 				if logging {
 					log.Println(r.Method, r.RequestURI)
 					log.Println(r.Method, r.RequestURI)
 				}
 				}
 				if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
 				if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
 					userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
 					userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
 					if len(userAgent) == 2 && userAgent[1] != VERSION {
 					if len(userAgent) == 2 && userAgent[1] != VERSION {
-						Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], VERSION)
+						utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], VERSION)
 					}
 					}
 				}
 				}
 				if err := localFct(srv, w, r, mux.Vars(r)); err != nil {
 				if err := localFct(srv, w, r, mux.Vars(r)); err != nil {

+ 5 - 0
api_params.go

@@ -68,3 +68,8 @@ type ApiWait struct {
 type ApiAuth struct {
 type ApiAuth struct {
 	Status string
 	Status string
 }
 }
+
+type ApiImageConfig struct {
+	Id string
+	*Config
+}

+ 122 - 74
api_test.go

@@ -6,6 +6,8 @@ import (
 	"bytes"
 	"bytes"
 	"encoding/json"
 	"encoding/json"
 	"github.com/dotcloud/docker/auth"
 	"github.com/dotcloud/docker/auth"
+	"github.com/dotcloud/docker/registry"
+	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"net"
 	"net"
 	"net/http"
 	"net/http"
@@ -23,7 +25,10 @@ func TestGetAuth(t *testing.T) {
 	}
 	}
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	srv := &Server{runtime: runtime}
+	srv := &Server{
+		runtime:  runtime,
+		registry: registry.NewRegistry(runtime.root),
+	}
 
 
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 
 
@@ -46,13 +51,14 @@ func TestGetAuth(t *testing.T) {
 	if err := postAuth(srv, r, req, nil); err != nil {
 	if err := postAuth(srv, r, req, nil); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
+
 	if r.Code != http.StatusOK && r.Code != 0 {
 	if r.Code != http.StatusOK && r.Code != 0 {
 		t.Fatalf("%d OK or 0 expected, received %d\n", http.StatusOK, r.Code)
 		t.Fatalf("%d OK or 0 expected, received %d\n", http.StatusOK, r.Code)
 	}
 	}
 
 
-	if runtime.authConfig.Username != authConfig.Username ||
-		runtime.authConfig.Password != authConfig.Password ||
-		runtime.authConfig.Email != authConfig.Email {
+	newAuthConfig := srv.registry.GetAuthConfig()
+	if newAuthConfig.Username != authConfig.Username ||
+		newAuthConfig.Email != authConfig.Email {
 		t.Fatalf("The auth configuration hasn't been set correctly")
 		t.Fatalf("The auth configuration hasn't been set correctly")
 	}
 	}
 }
 }
@@ -115,8 +121,8 @@ func TestGetImagesJson(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	// only_ids=0&all=0
-	req, err := http.NewRequest("GET", "/images/json?only_ids=0&all=0", nil)
+	// all=0
+	req, err := http.NewRequest("GET", "/images/json?all=0", nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -142,8 +148,8 @@ func TestGetImagesJson(t *testing.T) {
 
 
 	r2 := httptest.NewRecorder()
 	r2 := httptest.NewRecorder()
 
 
-	// only_ids=1&all=1
-	req2, err := http.NewRequest("GET", "/images/json?only_ids=1&all=1", nil)
+	// all=1
+	req2, err := http.NewRequest("GET", "/images/json?all=true", nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -161,12 +167,8 @@ func TestGetImagesJson(t *testing.T) {
 		t.Errorf("Excepted 1 image, %d found", len(images2))
 		t.Errorf("Excepted 1 image, %d found", len(images2))
 	}
 	}
 
 
-	if images2[0].Repository != "" {
-		t.Errorf("Excepted no image Repository, %s found", images2[0].Repository)
-	}
-
-	if images2[0].Id != GetTestImage(runtime).ShortId() {
-		t.Errorf("Retrieved image Id differs, expected %s, received %s", GetTestImage(runtime).ShortId(), images2[0].Id)
+	if images2[0].Id != GetTestImage(runtime).Id {
+		t.Errorf("Retrieved image Id differs, expected %s, received %s", GetTestImage(runtime).Id, images2[0].Id)
 	}
 	}
 
 
 	r3 := httptest.NewRecorder()
 	r3 := httptest.NewRecorder()
@@ -189,6 +191,24 @@ func TestGetImagesJson(t *testing.T) {
 	if len(images3) != 0 {
 	if len(images3) != 0 {
 		t.Errorf("Excepted 1 image, %d found", len(images3))
 		t.Errorf("Excepted 1 image, %d found", len(images3))
 	}
 	}
+
+	r4 := httptest.NewRecorder()
+
+	// all=foobar
+	req4, err := http.NewRequest("GET", "/images/json?all=foobar", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = getImagesJson(srv, r4, req4, nil)
+	if err == nil {
+		t.Fatalf("Error expected, received none")
+	}
+
+	httpError(r4, err)
+	if r4.Code != http.StatusBadRequest {
+		t.Fatalf("%d Bad Request expected, received %d\n", http.StatusBadRequest, r4.Code)
+	}
 }
 }
 
 
 func TestGetImagesViz(t *testing.T) {
 func TestGetImagesViz(t *testing.T) {
@@ -226,7 +246,10 @@ func TestGetImagesSearch(t *testing.T) {
 	}
 	}
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	srv := &Server{runtime: runtime}
+	srv := &Server{
+		runtime:  runtime,
+		registry: registry.NewRegistry(runtime.root),
+	}
 
 
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 
 
@@ -329,8 +352,8 @@ func TestGetContainersPs(t *testing.T) {
 	if len(containers) != 1 {
 	if len(containers) != 1 {
 		t.Fatalf("Excepted %d container, %d found", 1, len(containers))
 		t.Fatalf("Excepted %d container, %d found", 1, len(containers))
 	}
 	}
-	if containers[0].Id != container.ShortId() {
-		t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", container.ShortId(), containers[0].Id)
+	if containers[0].Id != container.Id {
+		t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", container.Id, containers[0].Id)
 	}
 	}
 }
 }
 
 
@@ -480,13 +503,16 @@ func TestPostAuth(t *testing.T) {
 	}
 	}
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	srv := &Server{runtime: runtime}
+	srv := &Server{
+		runtime:  runtime,
+		registry: registry.NewRegistry(runtime.root),
+	}
 
 
 	authConfigOrig := &auth.AuthConfig{
 	authConfigOrig := &auth.AuthConfig{
 		Username: "utest",
 		Username: "utest",
 		Email:    "utest@yopmail.com",
 		Email:    "utest@yopmail.com",
 	}
 	}
-	runtime.authConfig = authConfigOrig
+	srv.registry.ResetClient(authConfigOrig)
 
 
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 	if err := getAuth(srv, r, nil, nil); err != nil {
 	if err := getAuth(srv, r, nil, nil); err != nil {
@@ -552,56 +578,6 @@ func TestPostCommit(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestPostBuild(t *testing.T) {
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer nuke(runtime)
-
-	srv := &Server{runtime: runtime}
-
-	stdin, stdinPipe := io.Pipe()
-	stdout, stdoutPipe := io.Pipe()
-
-	c1 := make(chan struct{})
-	go func() {
-		defer close(c1)
-		r := &hijackTester{
-			ResponseRecorder: httptest.NewRecorder(),
-			in:               stdin,
-			out:              stdoutPipe,
-		}
-
-		if err := postBuild(srv, r, nil, nil); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// Acknowledge hijack
-	setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() {
-		stdout.Read([]byte{})
-		stdout.Read(make([]byte, 4096))
-	})
-
-	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("from docker-ut\n", "FROM docker-ut", stdout, stdinPipe, 15); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	// Close pipes (client disconnects)
-	if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
-		t.Fatal(err)
-	}
-
-	// Wait for build to finish, the client disconnected, therefore, Build finished his job
-	setTimeout(t, "Waiting for CmdBuild timed out", 2*time.Second, func() {
-		<-c1
-	})
-
-}
-
 func TestPostImagesCreate(t *testing.T) {
 func TestPostImagesCreate(t *testing.T) {
 	// FIXME: Use the staging in order to perform tests
 	// FIXME: Use the staging in order to perform tests
 
 
@@ -668,10 +644,82 @@ func TestPostImagesCreate(t *testing.T) {
 	// })
 	// })
 }
 }
 
 
-// func TestPostImagesInsert(t *testing.T) {
-// 	//FIXME: Implement this test (or remove this endpoint)
-// 	t.Log("Test not implemented")
-// }
+func TestPostImagesInsert(t *testing.T) {
+	// runtime, err := newTestRuntime()
+	// if err != nil {
+	// 	t.Fatal(err)
+	// }
+	// defer nuke(runtime)
+
+	// srv := &Server{runtime: runtime}
+
+	// stdin, stdinPipe := io.Pipe()
+	// stdout, stdoutPipe := io.Pipe()
+
+	// // Attach to it
+	// c1 := make(chan struct{})
+	// go func() {
+	// 	defer close(c1)
+	// 	r := &hijackTester{
+	// 		ResponseRecorder: httptest.NewRecorder(),
+	// 		in:               stdin,
+	// 		out:              stdoutPipe,
+	// 	}
+
+	// 	req, err := http.NewRequest("POST", "/images/"+unitTestImageName+"/insert?path=%2Ftest&url=https%3A%2F%2Fraw.github.com%2Fdotcloud%2Fdocker%2Fmaster%2FREADME.md", bytes.NewReader([]byte{}))
+	// 	if err != nil {
+	// 		t.Fatal(err)
+	// 	}
+	// 	if err := postContainersCreate(srv, r, req, nil); err != nil {
+	// 		t.Fatal(err)
+	// 	}
+	// }()
+
+	// // Acknowledge hijack
+	// setTimeout(t, "hijack acknowledge timed out", 5*time.Second, func() {
+	// 	stdout.Read([]byte{})
+	// 	stdout.Read(make([]byte, 4096))
+	// })
+
+	// id := ""
+	// setTimeout(t, "Waiting for imagesInsert output", 10*time.Second, func() {
+	// 	for {
+	// 		reader := bufio.NewReader(stdout)
+	// 		id, err = reader.ReadString('\n')
+	// 		if err != nil {
+	// 			t.Fatal(err)
+	// 		}
+	// 	}
+	// })
+
+	// // Close pipes (client disconnects)
+	// if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
+	// 	t.Fatal(err)
+	// }
+
+	// // Wait for attach to finish, the client disconnected, therefore, Attach finished his job
+	// setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() {
+	// 	<-c1
+	// })
+
+	// img, err := srv.runtime.repositories.LookupImage(id)
+	// if err != nil {
+	// 	t.Fatalf("New image %s expected but not found", id)
+	// }
+
+	// layer, err := img.layer()
+	// if err != nil {
+	// 	t.Fatal(err)
+	// }
+
+	// if _, err := os.Stat(path.Join(layer, "test")); err != nil {
+	// 	t.Fatalf("The test file has not been found")
+	// }
+
+	// if err := srv.runtime.graph.Delete(img.Id); err != nil {
+	// 	t.Fatal(err)
+	// }
+}
 
 
 func TestPostImagesPush(t *testing.T) {
 func TestPostImagesPush(t *testing.T) {
 	//FIXME: Use staging in order to perform tests
 	//FIXME: Use staging in order to perform tests
@@ -815,7 +863,7 @@ func TestPostContainersCreate(t *testing.T) {
 
 
 	if _, err := os.Stat(path.Join(container.rwPath(), "test")); err != nil {
 	if _, err := os.Stat(path.Join(container.rwPath(), "test")); err != nil {
 		if os.IsNotExist(err) {
 		if os.IsNotExist(err) {
-			Debugf("Err: %s", err)
+			utils.Debugf("Err: %s", err)
 			t.Fatalf("The test file has not been created")
 			t.Fatalf("The test file has not been created")
 		}
 		}
 		t.Fatal(err)
 		t.Fatal(err)

+ 11 - 4
auth/auth.go

@@ -15,13 +15,13 @@ import (
 const CONFIGFILE = ".dockercfg"
 const CONFIGFILE = ".dockercfg"
 
 
 // the registry server we want to login against
 // the registry server we want to login against
-const INDEX_SERVER = "https://index.docker.io"
+const INDEX_SERVER = "https://index.docker.io/v1"
 
 
 type AuthConfig struct {
 type AuthConfig struct {
 	Username string `json:"username"`
 	Username string `json:"username"`
 	Password string `json:"password"`
 	Password string `json:"password"`
 	Email    string `json:"email"`
 	Email    string `json:"email"`
-	rootPath string `json:-`
+	rootPath string
 }
 }
 
 
 func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
 func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
@@ -33,6 +33,13 @@ func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
 	}
 	}
 }
 }
 
 
+func IndexServerAddress() string {
+	if os.Getenv("DOCKER_INDEX_URL") != "" {
+		return os.Getenv("DOCKER_INDEX_URL") + "/v1"
+	}
+	return INDEX_SERVER
+}
+
 // create a base64 encoded auth string to store in config
 // create a base64 encoded auth string to store in config
 func EncodeAuth(authConfig *AuthConfig) string {
 func EncodeAuth(authConfig *AuthConfig) string {
 	authStr := authConfig.Username + ":" + authConfig.Password
 	authStr := authConfig.Username + ":" + authConfig.Password
@@ -119,7 +126,7 @@ func Login(authConfig *AuthConfig) (string, error) {
 
 
 	// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
 	// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
 	b := strings.NewReader(string(jsonBody))
 	b := strings.NewReader(string(jsonBody))
-	req1, err := http.Post(INDEX_SERVER+"/v1/users/", "application/json; charset=utf-8", b)
+	req1, err := http.Post(IndexServerAddress()+"/users/", "application/json; charset=utf-8", b)
 	if err != nil {
 	if err != nil {
 		return "", fmt.Errorf("Server Error: %s", err)
 		return "", fmt.Errorf("Server Error: %s", err)
 	}
 	}
@@ -139,7 +146,7 @@ func Login(authConfig *AuthConfig) (string, error) {
 			"Please check your e-mail for a confirmation link.")
 			"Please check your e-mail for a confirmation link.")
 	} else if reqStatusCode == 400 {
 	} else if reqStatusCode == 400 {
 		if string(reqBody) == "\"Username or email already exists\"" {
 		if string(reqBody) == "\"Username or email already exists\"" {
-			req, err := http.NewRequest("GET", INDEX_SERVER+"/v1/users/", nil)
+			req, err := http.NewRequest("GET", IndexServerAddress()+"/users/", nil)
 			req.SetBasicAuth(authConfig.Username, authConfig.Password)
 			req.SetBasicAuth(authConfig.Username, authConfig.Password)
 			resp, err := client.Do(req)
 			resp, err := client.Do(req)
 			if err != nil {
 			if err != nil {

+ 50 - 0
auth/auth_test.go

@@ -1,6 +1,10 @@
 package auth
 package auth
 
 
 import (
 import (
+	"crypto/rand"
+	"encoding/hex"
+	"os"
+	"strings"
 	"testing"
 	"testing"
 )
 )
 
 
@@ -21,3 +25,49 @@ func TestEncodeAuth(t *testing.T) {
 		t.Fatal("AuthString encoding isn't correct.")
 		t.Fatal("AuthString encoding isn't correct.")
 	}
 	}
 }
 }
+
+func TestLogin(t *testing.T) {
+	os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
+	defer os.Setenv("DOCKER_INDEX_URL", "")
+	authConfig := NewAuthConfig("unittester", "surlautrerivejetattendrai", "noise+unittester@dotcloud.com", "/tmp")
+	status, err := Login(authConfig)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if status != "Login Succeeded\n" {
+		t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status)
+	}
+}
+
+func TestCreateAccount(t *testing.T) {
+	os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
+	defer os.Setenv("DOCKER_INDEX_URL", "")
+	tokenBuffer := make([]byte, 16)
+	_, err := rand.Read(tokenBuffer)
+	if err != nil {
+		t.Fatal(err)
+	}
+	token := hex.EncodeToString(tokenBuffer)[:12]
+	username := "ut" + token
+	authConfig := NewAuthConfig(username, "test42", "docker-ut+"+token+"@example.com", "/tmp")
+	status, err := Login(authConfig)
+	if err != nil {
+		t.Fatal(err)
+	}
+	expectedStatus := "Account created. Please use the confirmation link we sent" +
+		" to your e-mail to activate it.\n"
+	if status != expectedStatus {
+		t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
+	}
+
+	status, err = Login(authConfig)
+	if err == nil {
+		t.Fatalf("Expected error but found nil instead")
+	}
+
+	expectedError := "Login: Account is not Active"
+
+	if !strings.Contains(err.Error(), expectedError) {
+		t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err.Error())
+	}
+}

+ 0 - 20
buildbot/README.rst

@@ -1,20 +0,0 @@
-Buildbot
-========
-
-Buildbot is a continuous integration system designed to automate the
-build/test cycle. By automatically rebuilding and testing the tree each time
-something has changed, build problems are pinpointed quickly, before other
-developers are inconvenienced by the failure.
-
-When running 'make hack' at the docker root directory, it spawns a virtual
-machine in the background running a buildbot instance and adds a git
-post-commit hook that automatically run docker tests for you.
-
-You can check your buildbot instance at http://192.168.33.21:8010/waterfall
-
-
-Buildbot dependencies
----------------------
-
-vagrant, virtualbox packages and python package requests
-

+ 0 - 28
buildbot/Vagrantfile

@@ -1,28 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-$BUILDBOT_IP = '192.168.33.21'
-
-def v10(config)
-  config.vm.box = "quantal64_3.5.0-25"
-  config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
-  config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/..'
-  config.vm.network :hostonly, $BUILDBOT_IP
-
-  # Ensure puppet is installed on the instance
-  config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y puppet'
-
-  config.vm.provision :puppet do |puppet|
-    puppet.manifests_path = '.'
-    puppet.manifest_file  = 'buildbot.pp'
-    puppet.options = ['--templatedir','.']
-  end
-end
-
-Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
-  v10(config)
-end
-
-Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
-  v10(config)
-end

+ 0 - 43
buildbot/buildbot-cfg/buildbot-cfg.sh

@@ -1,43 +0,0 @@
-#!/bin/bash
-
-# Auto setup of buildbot configuration. Package installation is being done
-# on buildbot.pp
-# Dependencies: buildbot, buildbot-slave, supervisor
-
-SLAVE_NAME='buildworker'
-SLAVE_SOCKET='localhost:9989'
-BUILDBOT_PWD='pass-docker'
-USER='vagrant'
-ROOT_PATH='/data/buildbot'
-DOCKER_PATH='/data/docker'
-BUILDBOT_CFG="$DOCKER_PATH/buildbot/buildbot-cfg"
-IP=$(grep BUILDBOT_IP /data/docker/buildbot/Vagrantfile | awk -F "'" '{ print $2; }')
-
-function run { su $USER -c "$1"; }
-
-export PATH=/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin
-
-# Exit if buildbot has already been installed
-[ -d "$ROOT_PATH" ] && exit 0
-
-# Setup buildbot
-run "mkdir -p ${ROOT_PATH}"
-cd ${ROOT_PATH}
-run "buildbot create-master master"
-run "cp $BUILDBOT_CFG/master.cfg master"
-run "sed -i 's/localhost/$IP/' master/master.cfg"
-run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
-
-# Allow buildbot subprocesses (docker tests) to properly run in containers,
-# in particular with docker -u
-run "sed -i 's/^umask = None/umask = 000/' ${ROOT_PATH}/slave/buildbot.tac"
-
-# Setup supervisor
-cp $BUILDBOT_CFG/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
-sed -i "s/^chmod=0700.*0700./chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
-kill -HUP `pgrep -f "/usr/bin/python /usr/bin/supervisord"`
-
-# Add git hook
-cp $BUILDBOT_CFG/post-commit $DOCKER_PATH/.git/hooks
-sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit
-

+ 0 - 32
buildbot/buildbot.pp

@@ -1,32 +0,0 @@
-node default {
-    $USER = 'vagrant'
-    $ROOT_PATH = '/data/buildbot'
-    $DOCKER_PATH = '/data/docker'
-
-    exec {'apt_update': command => '/usr/bin/apt-get update' }
-    Package { require => Exec['apt_update'] }
-    group {'puppet': ensure => 'present'}
-
-    # Install dependencies
-    Package { ensure => 'installed' }
-    package { ['python-dev','python-pip','supervisor','lxc','bsdtar','git','golang']: }
-
-    file{[ '/data' ]:
-        owner => $USER, group => $USER, ensure => 'directory' }
-
-    file {'/var/tmp/requirements.txt':
-        content => template('requirements.txt') }
-
-    exec {'requirements':
-        require => [ Package['python-dev'], Package['python-pip'],
-            File['/var/tmp/requirements.txt'] ],
-        cwd     => '/var/tmp',
-        command => "/bin/sh -c '(/usr/bin/pip install -r requirements.txt;
-            rm /var/tmp/requirements.txt)'" }
-
-    exec {'buildbot-cfg-sh':
-        require => [ Package['supervisor'], Exec['requirements']],
-        path    => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin',
-        cwd     => '/data',
-        command => "$DOCKER_PATH/buildbot/buildbot-cfg/buildbot-cfg.sh" }
-}

+ 4 - 349
builder.go

@@ -1,13 +1,9 @@
 package docker
 package docker
 
 
 import (
 import (
-	"bufio"
-	"encoding/json"
 	"fmt"
 	"fmt"
-	"io"
 	"os"
 	"os"
 	"path"
 	"path"
-	"strings"
 	"time"
 	"time"
 )
 )
 
 
@@ -15,6 +11,9 @@ type Builder struct {
 	runtime      *Runtime
 	runtime      *Runtime
 	repositories *TagStore
 	repositories *TagStore
 	graph        *Graph
 	graph        *Graph
+
+	config *Config
+	image  *Image
 }
 }
 
 
 func NewBuilder(runtime *Runtime) *Builder {
 func NewBuilder(runtime *Runtime) *Builder {
@@ -25,42 +24,6 @@ func NewBuilder(runtime *Runtime) *Builder {
 	}
 	}
 }
 }
 
 
-func (builder *Builder) mergeConfig(userConf, imageConf *Config) {
-	if userConf.Hostname != "" {
-		userConf.Hostname = imageConf.Hostname
-	}
-	if userConf.User != "" {
-		userConf.User = imageConf.User
-	}
-	if userConf.Memory == 0 {
-		userConf.Memory = imageConf.Memory
-	}
-	if userConf.MemorySwap == 0 {
-		userConf.MemorySwap = imageConf.MemorySwap
-	}
-	if userConf.PortSpecs == nil || len(userConf.PortSpecs) == 0 {
-		userConf.PortSpecs = imageConf.PortSpecs
-	}
-	if !userConf.Tty {
-		userConf.Tty = userConf.Tty
-	}
-	if !userConf.OpenStdin {
-		userConf.OpenStdin = imageConf.OpenStdin
-	}
-	if !userConf.StdinOnce {
-		userConf.StdinOnce = imageConf.StdinOnce
-	}
-	if userConf.Env == nil || len(userConf.Env) == 0 {
-		userConf.Env = imageConf.Env
-	}
-	if userConf.Cmd == nil || len(userConf.Cmd) == 0 {
-		userConf.Cmd = imageConf.Cmd
-	}
-	if userConf.Dns == nil || len(userConf.Dns) == 0 {
-		userConf.Dns = imageConf.Dns
-	}
-}
-
 func (builder *Builder) Create(config *Config) (*Container, error) {
 func (builder *Builder) Create(config *Config) (*Container, error) {
 	// Lookup image
 	// Lookup image
 	img, err := builder.repositories.LookupImage(config.Image)
 	img, err := builder.repositories.LookupImage(config.Image)
@@ -69,7 +32,7 @@ func (builder *Builder) Create(config *Config) (*Container, error) {
 	}
 	}
 
 
 	if img.Config != nil {
 	if img.Config != nil {
-		builder.mergeConfig(config, img.Config)
+		MergeConfig(config, img.Config)
 	}
 	}
 
 
 	if config.Cmd == nil || len(config.Cmd) == 0 {
 	if config.Cmd == nil || len(config.Cmd) == 0 {
@@ -153,311 +116,3 @@ func (builder *Builder) Commit(container *Container, repository, tag, comment, a
 	}
 	}
 	return img, nil
 	return img, nil
 }
 }
-
-func (builder *Builder) clearTmp(containers, images map[string]struct{}) {
-	for c := range containers {
-		tmp := builder.runtime.Get(c)
-		builder.runtime.Destroy(tmp)
-		Debugf("Removing container %s", c)
-	}
-	for i := range images {
-		builder.runtime.graph.Delete(i)
-		Debugf("Removing image %s", i)
-	}
-}
-
-func (builder *Builder) getCachedImage(image *Image, config *Config) (*Image, error) {
-	// Retrieve all images
-	images, err := builder.graph.All()
-	if err != nil {
-		return nil, err
-	}
-
-	// Store the tree in a map of map (map[parentId][childId])
-	imageMap := make(map[string]map[string]struct{})
-	for _, img := range images {
-		if _, exists := imageMap[img.Parent]; !exists {
-			imageMap[img.Parent] = make(map[string]struct{})
-		}
-		imageMap[img.Parent][img.Id] = struct{}{}
-	}
-
-	// Loop on the children of the given image and check the config
-	for elem := range imageMap[image.Id] {
-		img, err := builder.graph.Get(elem)
-		if err != nil {
-			return nil, err
-		}
-		if CompareConfig(&img.ContainerConfig, config) {
-			return img, nil
-		}
-	}
-	return nil, nil
-}
-
-func (builder *Builder) Build(dockerfile io.Reader, stdout io.Writer) (*Image, error) {
-	var (
-		image, base   *Image
-		config        *Config
-		maintainer    string
-		env           map[string]string   = make(map[string]string)
-		tmpContainers map[string]struct{} = make(map[string]struct{})
-		tmpImages     map[string]struct{} = make(map[string]struct{})
-	)
-	defer builder.clearTmp(tmpContainers, tmpImages)
-
-	file := bufio.NewReader(dockerfile)
-	for {
-		line, err := file.ReadString('\n')
-		if err != nil {
-			if err == io.EOF {
-				break
-			}
-			return nil, err
-		}
-		line = strings.Replace(strings.TrimSpace(line), "	", " ", 1)
-		// Skip comments and empty line
-		if len(line) == 0 || line[0] == '#' {
-			continue
-		}
-		tmp := strings.SplitN(line, " ", 2)
-		if len(tmp) != 2 {
-			return nil, fmt.Errorf("Invalid Dockerfile format")
-		}
-		instruction := strings.Trim(tmp[0], " ")
-		arguments := strings.Trim(tmp[1], " ")
-		switch strings.ToLower(instruction) {
-		case "from":
-			fmt.Fprintf(stdout, "FROM %s\n", arguments)
-			image, err = builder.runtime.repositories.LookupImage(arguments)
-			if err != nil {
-				if builder.runtime.graph.IsNotExist(err) {
-
-					var tag, remote string
-					if strings.Contains(arguments, ":") {
-						remoteParts := strings.Split(arguments, ":")
-						tag = remoteParts[1]
-						remote = remoteParts[0]
-					} else {
-						remote = arguments
-					}
-
-					if err := builder.runtime.graph.PullRepository(stdout, remote, tag, builder.runtime.repositories, builder.runtime.authConfig); err != nil {
-						return nil, err
-					}
-
-					image, err = builder.runtime.repositories.LookupImage(arguments)
-					if err != nil {
-						return nil, err
-					}
-				} else {
-					return nil, err
-				}
-			}
-			config = &Config{}
-
-			break
-		case "maintainer":
-			fmt.Fprintf(stdout, "MAINTAINER %s\n", arguments)
-			maintainer = arguments
-			break
-		case "run":
-			fmt.Fprintf(stdout, "RUN %s\n", arguments)
-			if image == nil {
-				return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
-			}
-			config, _, err := ParseRun([]string{image.Id, "/bin/sh", "-c", arguments}, builder.runtime.capabilities)
-			if err != nil {
-				return nil, err
-			}
-
-			for key, value := range env {
-				config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, value))
-			}
-
-			if cache, err := builder.getCachedImage(image, config); err != nil {
-				return nil, err
-			} else if cache != nil {
-				image = cache
-				fmt.Fprintf(stdout, "===> %s\n", image.ShortId())
-				break
-			}
-
-			Debugf("Env -----> %v ------ %v\n", config.Env, env)
-
-			// Create the container and start it
-			c, err := builder.Create(config)
-			if err != nil {
-				return nil, err
-			}
-
-			if os.Getenv("DEBUG") != "" {
-				out, _ := c.StdoutPipe()
-				err2, _ := c.StderrPipe()
-				go io.Copy(os.Stdout, out)
-				go io.Copy(os.Stdout, err2)
-			}
-
-			if err := c.Start(); err != nil {
-				return nil, err
-			}
-			tmpContainers[c.Id] = struct{}{}
-
-			// Wait for it to finish
-			if result := c.Wait(); result != 0 {
-				return nil, fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", arguments, result)
-			}
-
-			// Commit the container
-			base, err = builder.Commit(c, "", "", "", maintainer, nil)
-			if err != nil {
-				return nil, err
-			}
-			tmpImages[base.Id] = struct{}{}
-
-			fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
-
-			// use the base as the new image
-			image = base
-
-			break
-		case "env":
-			tmp := strings.SplitN(arguments, " ", 2)
-			if len(tmp) != 2 {
-				return nil, fmt.Errorf("Invalid ENV format")
-			}
-			key := strings.Trim(tmp[0], " ")
-			value := strings.Trim(tmp[1], " ")
-			fmt.Fprintf(stdout, "ENV %s %s\n", key, value)
-			env[key] = value
-			if image != nil {
-				fmt.Fprintf(stdout, "===> %s\n", image.ShortId())
-			} else {
-				fmt.Fprintf(stdout, "===> <nil>\n")
-			}
-			break
-		case "cmd":
-			fmt.Fprintf(stdout, "CMD %s\n", arguments)
-
-			// Create the container and start it
-			c, err := builder.Create(&Config{Image: image.Id, Cmd: []string{"", ""}})
-			if err != nil {
-				return nil, err
-			}
-			if err := c.Start(); err != nil {
-				return nil, err
-			}
-			tmpContainers[c.Id] = struct{}{}
-
-			cmd := []string{}
-			if err := json.Unmarshal([]byte(arguments), &cmd); err != nil {
-				return nil, err
-			}
-			config.Cmd = cmd
-
-			// Commit the container
-			base, err = builder.Commit(c, "", "", "", maintainer, config)
-			if err != nil {
-				return nil, err
-			}
-			tmpImages[base.Id] = struct{}{}
-
-			fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
-			image = base
-			break
-		case "expose":
-			ports := strings.Split(arguments, " ")
-
-			fmt.Fprintf(stdout, "EXPOSE %v\n", ports)
-			if image == nil {
-				return nil, fmt.Errorf("Please provide a source image with `from` prior to copy")
-			}
-
-			// Create the container and start it
-			c, err := builder.Create(&Config{Image: image.Id, Cmd: []string{"", ""}})
-			if err != nil {
-				return nil, err
-			}
-			if err := c.Start(); err != nil {
-				return nil, err
-			}
-			tmpContainers[c.Id] = struct{}{}
-
-			config.PortSpecs = append(ports, config.PortSpecs...)
-
-			// Commit the container
-			base, err = builder.Commit(c, "", "", "", maintainer, config)
-			if err != nil {
-				return nil, err
-			}
-			tmpImages[base.Id] = struct{}{}
-
-			fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
-			image = base
-			break
-		case "insert":
-			if image == nil {
-				return nil, fmt.Errorf("Please provide a source image with `from` prior to copy")
-			}
-			tmp = strings.SplitN(arguments, " ", 2)
-			if len(tmp) != 2 {
-				return nil, fmt.Errorf("Invalid INSERT format")
-			}
-			sourceUrl := strings.Trim(tmp[0], " ")
-			destPath := strings.Trim(tmp[1], " ")
-			fmt.Fprintf(stdout, "COPY %s to %s in %s\n", sourceUrl, destPath, base.ShortId())
-
-			file, err := Download(sourceUrl, stdout)
-			if err != nil {
-				return nil, err
-			}
-			defer file.Body.Close()
-
-			config, _, err := ParseRun([]string{base.Id, "echo", "insert", sourceUrl, destPath}, builder.runtime.capabilities)
-			if err != nil {
-				return nil, err
-			}
-			c, err := builder.Create(config)
-			if err != nil {
-				return nil, err
-			}
-
-			if err := c.Start(); err != nil {
-				return nil, err
-			}
-
-			// Wait for echo to finish
-			if result := c.Wait(); result != 0 {
-				return nil, fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", arguments, result)
-			}
-
-			if err := c.Inject(file.Body, destPath); err != nil {
-				return nil, err
-			}
-
-			base, err = builder.Commit(c, "", "", "", maintainer, nil)
-			if err != nil {
-				return nil, err
-			}
-			fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
-
-			image = base
-
-			break
-		default:
-			fmt.Fprintf(stdout, "Skipping unknown instruction %s\n", strings.ToUpper(instruction))
-		}
-	}
-	if image != nil {
-		// The build is successful, keep the temporary containers and images
-		for i := range tmpImages {
-			delete(tmpImages, i)
-		}
-		for i := range tmpContainers {
-			delete(tmpContainers, i)
-		}
-		fmt.Fprintf(stdout, "Build finished. image id: %s\n", image.ShortId())
-		return image, nil
-	}
-	return nil, fmt.Errorf("An error occured during the build\n")
-}

+ 311 - 0
builder_client.go

@@ -0,0 +1,311 @@
+package docker
+
+import (
+	"bufio"
+	"encoding/json"
+	"fmt"
+	"github.com/dotcloud/docker/utils"
+	"io"
+	"net/url"
+	"os"
+	"reflect"
+	"strings"
+)
+
+type BuilderClient interface {
+	Build(io.Reader) (string, error)
+	CmdFrom(string) error
+	CmdRun(string) error
+}
+
+type builderClient struct {
+	cli *DockerCli
+
+	image      string
+	maintainer string
+	config     *Config
+
+	tmpContainers map[string]struct{}
+	tmpImages     map[string]struct{}
+
+	needCommit bool
+}
+
+func (b *builderClient) clearTmp(containers, images map[string]struct{}) {
+	for c := range containers {
+		if _, _, err := b.cli.call("DELETE", "/containers/"+c, nil); err != nil {
+			utils.Debugf("%s", err)
+		}
+		utils.Debugf("Removing container %s", c)
+	}
+	for i := range images {
+		if _, _, err := b.cli.call("DELETE", "/images/"+i, nil); err != nil {
+			utils.Debugf("%s", err)
+		}
+		utils.Debugf("Removing image %s", i)
+	}
+}
+
+func (b *builderClient) CmdFrom(name string) error {
+	obj, statusCode, err := b.cli.call("GET", "/images/"+name+"/json", nil)
+	if statusCode == 404 {
+
+		remote := name
+		var tag string
+		if strings.Contains(remote, ":") {
+			remoteParts := strings.Split(remote, ":")
+			tag = remoteParts[1]
+			remote = remoteParts[0]
+		}
+		var out io.Writer
+		if os.Getenv("DEBUG") != "" {
+			out = os.Stdout
+		} else {
+			out = &utils.NopWriter{}
+		}
+		if err := b.cli.stream("POST", "/images/create?fromImage="+remote+"&tag="+tag, nil, out); err != nil {
+			return err
+		}
+		obj, _, err = b.cli.call("GET", "/images/"+name+"/json", nil)
+		if err != nil {
+			return err
+		}
+	}
+	if err != nil {
+		return err
+	}
+
+	img := &ApiId{}
+	if err := json.Unmarshal(obj, img); err != nil {
+		return err
+	}
+	b.image = img.Id
+	utils.Debugf("Using image %s", b.image)
+	return nil
+}
+
+func (b *builderClient) CmdMaintainer(name string) error {
+	b.needCommit = true
+	b.maintainer = name
+	return nil
+}
+
+func (b *builderClient) CmdRun(args string) error {
+	if b.image == "" {
+		return fmt.Errorf("Please provide a source image with `from` prior to run")
+	}
+	config, _, err := ParseRun([]string{b.image, "/bin/sh", "-c", args}, nil)
+	if err != nil {
+		return err
+	}
+
+	cmd, env := b.config.Cmd, b.config.Env
+	b.config.Cmd = nil
+	MergeConfig(b.config, config)
+
+	body, statusCode, err := b.cli.call("POST", "/images/getCache", &ApiImageConfig{Id: b.image, Config: b.config})
+	if err != nil {
+		if statusCode != 404 {
+			return err
+		}
+	}
+	if statusCode != 404 {
+		apiId := &ApiId{}
+		if err := json.Unmarshal(body, apiId); err != nil {
+			return err
+		}
+		utils.Debugf("Use cached version")
+		b.image = apiId.Id
+		return nil
+	}
+	cid, err := b.run()
+	if err != nil {
+		return err
+	}
+	b.config.Cmd, b.config.Env = cmd, env
+	return b.commit(cid)
+}
+
+func (b *builderClient) CmdEnv(args string) error {
+	b.needCommit = true
+	tmp := strings.SplitN(args, " ", 2)
+	if len(tmp) != 2 {
+		return fmt.Errorf("Invalid ENV format")
+	}
+	key := strings.Trim(tmp[0], " ")
+	value := strings.Trim(tmp[1], " ")
+
+	for i, elem := range b.config.Env {
+		if strings.HasPrefix(elem, key+"=") {
+			b.config.Env[i] = key + "=" + value
+			return nil
+		}
+	}
+	b.config.Env = append(b.config.Env, key+"="+value)
+	return nil
+}
+
+func (b *builderClient) CmdCmd(args string) error {
+	b.needCommit = true
+	var cmd []string
+	if err := json.Unmarshal([]byte(args), &cmd); err != nil {
+		utils.Debugf("Error unmarshalling: %s, using /bin/sh -c", err)
+		b.config.Cmd = []string{"/bin/sh", "-c", args}
+	} else {
+		b.config.Cmd = cmd
+	}
+	return nil
+}
+
+func (b *builderClient) CmdExpose(args string) error {
+	ports := strings.Split(args, " ")
+	b.config.PortSpecs = append(ports, b.config.PortSpecs...)
+	return nil
+}
+
+func (b *builderClient) CmdInsert(args string) error {
+	// FIXME: Reimplement this once the remove_hijack branch gets merged.
+	// We need to retrieve the resulting Id
+	return fmt.Errorf("INSERT not implemented")
+}
+
+func (b *builderClient) run() (string, error) {
+	if b.image == "" {
+		return "", fmt.Errorf("Please provide a source image with `from` prior to run")
+	}
+	b.config.Image = b.image
+	body, _, err := b.cli.call("POST", "/containers/create", b.config)
+	if err != nil {
+		return "", err
+	}
+
+	apiRun := &ApiRun{}
+	if err := json.Unmarshal(body, apiRun); err != nil {
+		return "", err
+	}
+	for _, warning := range apiRun.Warnings {
+		fmt.Fprintln(os.Stderr, "WARNING: ", warning)
+	}
+
+	//start the container
+	_, _, err = b.cli.call("POST", "/containers/"+apiRun.Id+"/start", nil)
+	if err != nil {
+		return "", err
+	}
+	b.tmpContainers[apiRun.Id] = struct{}{}
+
+	// Wait for it to finish
+	body, _, err = b.cli.call("POST", "/containers/"+apiRun.Id+"/wait", nil)
+	if err != nil {
+		return "", err
+	}
+	apiWait := &ApiWait{}
+	if err := json.Unmarshal(body, apiWait); err != nil {
+		return "", err
+	}
+	if apiWait.StatusCode != 0 {
+		return "", fmt.Errorf("The command %v returned a non-zero code: %d", b.config.Cmd, apiWait.StatusCode)
+	}
+
+	return apiRun.Id, nil
+}
+
+func (b *builderClient) commit(id string) error {
+	if b.image == "" {
+		return fmt.Errorf("Please provide a source image with `from` prior to run")
+	}
+	b.config.Image = b.image
+
+	if id == "" {
+		cmd := b.config.Cmd
+		b.config.Cmd = []string{"true"}
+		if cid, err := b.run(); err != nil {
+			return err
+		} else {
+			id = cid
+		}
+		b.config.Cmd = cmd
+	}
+
+	// Commit the container
+	v := url.Values{}
+	v.Set("container", id)
+	v.Set("author", b.maintainer)
+
+	body, _, err := b.cli.call("POST", "/commit?"+v.Encode(), b.config)
+	if err != nil {
+		return err
+	}
+	apiId := &ApiId{}
+	if err := json.Unmarshal(body, apiId); err != nil {
+		return err
+	}
+	b.tmpImages[apiId.Id] = struct{}{}
+	b.image = apiId.Id
+	b.needCommit = false
+	return nil
+}
+
+func (b *builderClient) Build(dockerfile io.Reader) (string, error) {
+	defer b.clearTmp(b.tmpContainers, b.tmpImages)
+	file := bufio.NewReader(dockerfile)
+	for {
+		line, err := file.ReadString('\n')
+		if err != nil {
+			if err == io.EOF {
+				break
+			}
+			return "", err
+		}
+		line = strings.Replace(strings.TrimSpace(line), "	", " ", 1)
+		// Skip comments and empty line
+		if len(line) == 0 || line[0] == '#' {
+			continue
+		}
+		tmp := strings.SplitN(line, " ", 2)
+		if len(tmp) != 2 {
+			return "", fmt.Errorf("Invalid Dockerfile format")
+		}
+		instruction := strings.ToLower(strings.Trim(tmp[0], " "))
+		arguments := strings.Trim(tmp[1], " ")
+
+		fmt.Printf("%s %s (%s)\n", strings.ToUpper(instruction), arguments, b.image)
+
+		method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
+		if !exists {
+			fmt.Printf("Skipping unknown instruction %s\n", strings.ToUpper(instruction))
+		}
+		ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
+		if ret != nil {
+			return "", ret.(error)
+		}
+
+		fmt.Printf("===> %v\n", b.image)
+	}
+	if b.needCommit {
+		if err := b.commit(""); err != nil {
+			return "", err
+		}
+	}
+	if b.image != "" {
+		// The build is successful, keep the temporary containers and images
+		for i := range b.tmpImages {
+			delete(b.tmpImages, i)
+		}
+		for i := range b.tmpContainers {
+			delete(b.tmpContainers, i)
+		}
+		fmt.Printf("Build finished. image id: %s\n", b.image)
+		return b.image, nil
+	}
+	return "", fmt.Errorf("An error occured during the build\n")
+}
+
+func NewBuilderClient(addr string, port int) BuilderClient {
+	return &builderClient{
+		cli:           NewDockerCli(addr, port),
+		config:        &Config{},
+		tmpContainers: make(map[string]struct{}),
+		tmpImages:     make(map[string]struct{}),
+	}
+}

+ 0 - 88
builder_test.go

@@ -1,88 +0,0 @@
-package docker
-
-import (
-	"strings"
-	"testing"
-)
-
-const Dockerfile = `
-# VERSION		0.1
-# DOCKER-VERSION	0.2
-
-from   ` + unitTestImageName + `
-run    sh -c 'echo root:testpass > /tmp/passwd'
-run    mkdir -p /var/run/sshd
-insert https://raw.github.com/dotcloud/docker/master/CHANGELOG.md /tmp/CHANGELOG.md
-`
-
-func TestBuild(t *testing.T) {
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer nuke(runtime)
-
-	builder := NewBuilder(runtime)
-
-	img, err := builder.Build(strings.NewReader(Dockerfile), &nopWriter{})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	container, err := builder.Create(
-		&Config{
-			Image: img.Id,
-			Cmd:   []string{"cat", "/tmp/passwd"},
-		},
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer runtime.Destroy(container)
-
-	output, err := container.Output()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if string(output) != "root:testpass\n" {
-		t.Fatalf("Unexpected output. Read '%s', expected '%s'", output, "root:testpass\n")
-	}
-
-	container2, err := builder.Create(
-		&Config{
-			Image: img.Id,
-			Cmd:   []string{"ls", "-d", "/var/run/sshd"},
-		},
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer runtime.Destroy(container2)
-
-	output, err = container2.Output()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if string(output) != "/var/run/sshd\n" {
-		t.Fatal("/var/run/sshd has not been created")
-	}
-
-	container3, err := builder.Create(
-		&Config{
-			Image: img.Id,
-			Cmd:   []string{"cat", "/tmp/CHANGELOG.md"},
-		},
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer runtime.Destroy(container3)
-
-	output, err = container3.Output()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(output) == 0 {
-		t.Fatal("/tmp/CHANGELOG.md has not been copied")
-	}
-}

+ 207 - 166
commands.go

@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/auth"
 	"github.com/dotcloud/docker/auth"
 	"github.com/dotcloud/docker/term"
 	"github.com/dotcloud/docker/term"
+	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"net"
 	"net"
@@ -15,6 +16,7 @@ import (
 	"net/url"
 	"net/url"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
+	"reflect"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 	"text/tabwriter"
 	"text/tabwriter"
@@ -29,88 +31,66 @@ var (
 )
 )
 
 
 func ParseCommands(args ...string) error {
 func ParseCommands(args ...string) error {
-
-	cmds := map[string]func(args ...string) error{
-		"attach":  CmdAttach,
-		"build":   CmdBuild,
-		"commit":  CmdCommit,
-		"diff":    CmdDiff,
-		"export":  CmdExport,
-		"images":  CmdImages,
-		"info":    CmdInfo,
-		"insert":  CmdInsert,
-		"inspect": CmdInspect,
-		"import":  CmdImport,
-		"history": CmdHistory,
-		"kill":    CmdKill,
-		"login":   CmdLogin,
-		"logs":    CmdLogs,
-		"port":    CmdPort,
-		"ps":      CmdPs,
-		"pull":    CmdPull,
-		"push":    CmdPush,
-		"restart": CmdRestart,
-		"rm":      CmdRm,
-		"rmi":     CmdRmi,
-		"run":     CmdRun,
-		"tag":     CmdTag,
-		"search":  CmdSearch,
-		"start":   CmdStart,
-		"stop":    CmdStop,
-		"version": CmdVersion,
-		"wait":    CmdWait,
-	}
+	cli := NewDockerCli("0.0.0.0", 4243)
 
 
 	if len(args) > 0 {
 	if len(args) > 0 {
-		cmd, exists := cmds[args[0]]
+		methodName := "Cmd" + strings.ToUpper(args[0][:1]) + strings.ToLower(args[0][1:])
+		method, exists := reflect.TypeOf(cli).MethodByName(methodName)
 		if !exists {
 		if !exists {
 			fmt.Println("Error: Command not found:", args[0])
 			fmt.Println("Error: Command not found:", args[0])
-			return cmdHelp(args...)
+			return cli.CmdHelp(args...)
+		}
+		ret := method.Func.CallSlice([]reflect.Value{
+			reflect.ValueOf(cli),
+			reflect.ValueOf(args[1:]),
+		})[0].Interface()
+		if ret == nil {
+			return nil
 		}
 		}
-		return cmd(args[1:]...)
+		return ret.(error)
 	}
 	}
-	return cmdHelp(args...)
+	return cli.CmdHelp(args...)
 }
 }
 
 
-func cmdHelp(args ...string) error {
+func (cli *DockerCli) CmdHelp(args ...string) error {
 	help := "Usage: docker COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n"
 	help := "Usage: docker COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n"
-	for _, cmd := range [][]string{
-		{"attach", "Attach to a running container"},
-		{"build", "Build a container from Dockerfile via stdin"},
-		{"commit", "Create a new image from a container's changes"},
-		{"diff", "Inspect changes on a container's filesystem"},
-		{"export", "Stream the contents of a container as a tar archive"},
-		{"history", "Show the history of an image"},
-		{"images", "List images"},
-		{"import", "Create a new filesystem image from the contents of a tarball"},
-		{"info", "Display system-wide information"},
-		{"insert", "Insert a file in an image"},
-		{"inspect", "Return low-level information on a container"},
-		{"kill", "Kill a running container"},
-		{"login", "Register or Login to the docker registry server"},
-		{"logs", "Fetch the logs of a container"},
-		{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
-		{"ps", "List containers"},
-		{"pull", "Pull an image or a repository from the docker registry server"},
-		{"push", "Push an image or a repository to the docker registry server"},
-		{"restart", "Restart a running container"},
-		{"rm", "Remove a container"},
-		{"rmi", "Remove an image"},
-		{"run", "Run a command in a new container"},
-		{"search", "Search for an image in the docker index"},
-		{"start", "Start a stopped container"},
-		{"stop", "Stop a running container"},
-		{"tag", "Tag an image into a repository"},
-		{"version", "Show the docker version information"},
-		{"wait", "Block until a container stops, then print its exit code"},
+	for cmd, description := range map[string]string{
+		"attach":  "Attach to a running container",
+		"build":   "Build a container from Dockerfile or via stdin",
+		"commit":  "Create a new image from a container's changes",
+		"diff":    "Inspect changes on a container's filesystem",
+		"export":  "Stream the contents of a container as a tar archive",
+		"history": "Show the history of an image",
+		"images":  "List images",
+		"import":  "Create a new filesystem image from the contents of a tarball",
+		"info":    "Display system-wide information",
+		"insert":  "Insert a file in an image",
+		"inspect": "Return low-level information on a container",
+		"kill":    "Kill a running container",
+		"login":   "Register or Login to the docker registry server",
+		"logs":    "Fetch the logs of a container",
+		"port":    "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT",
+		"ps":      "List containers",
+		"pull":    "Pull an image or a repository from the docker registry server",
+		"push":    "Push an image or a repository to the docker registry server",
+		"restart": "Restart a running container",
+		"rm":      "Remove a container",
+		"rmi":     "Remove an image",
+		"run":     "Run a command in a new container",
+		"search":  "Search for an image in the docker index",
+		"start":   "Start a stopped container",
+		"stop":    "Stop a running container",
+		"tag":     "Tag an image into a repository",
+		"version": "Show the docker version information",
+		"wait":    "Block until a container stops, then print its exit code",
 	} {
 	} {
-		help += fmt.Sprintf("    %-10.10s%s\n", cmd[0], cmd[1])
+		help += fmt.Sprintf("    %-10.10s%s\n", cmd, description)
 	}
 	}
 	fmt.Println(help)
 	fmt.Println(help)
 	return nil
 	return nil
 }
 }
 
 
-func CmdInsert(args ...string) error {
+func (cli *DockerCli) CmdInsert(args ...string) error {
 	cmd := Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
 	cmd := Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -124,28 +104,44 @@ func CmdInsert(args ...string) error {
 	v.Set("url", cmd.Arg(1))
 	v.Set("url", cmd.Arg(1))
 	v.Set("path", cmd.Arg(2))
 	v.Set("path", cmd.Arg(2))
 
 
-	err := hijack("POST", "/images/"+cmd.Arg(0)+"?"+v.Encode(), false)
+	err := cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, os.Stdout)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func CmdBuild(args ...string) error {
-	cmd := Subcmd("build", "-", "Build an image from Dockerfile via stdin")
+func (cli *DockerCli) CmdBuild(args ...string) error {
+	cmd := Subcmd("build", "-|Dockerfile", "Build an image from Dockerfile or via stdin")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
 	}
 	}
+	var (
+		file io.ReadCloser
+		err  error
+	)
 
 
-	err := hijack("POST", "/build", false)
-	if err != nil {
+	if cmd.NArg() == 0 {
+		file, err = os.Open("Dockerfile")
+		if err != nil {
+			return err
+		}
+	} else if cmd.Arg(0) == "-" {
+		file = os.Stdin
+	} else {
+		file, err = os.Open(cmd.Arg(0))
+		if err != nil {
+			return err
+		}
+	}
+	if _, err := NewBuilderClient("0.0.0.0", 4243).Build(file); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 // 'docker login': login / register a user to registry service.
 // 'docker login': login / register a user to registry service.
-func CmdLogin(args ...string) error {
+func (cli *DockerCli) CmdLogin(args ...string) error {
 	var readStringOnRawTerminal = func(stdin io.Reader, stdout io.Writer, echo bool) string {
 	var readStringOnRawTerminal = func(stdin io.Reader, stdout io.Writer, echo bool) string {
 		char := make([]byte, 1)
 		char := make([]byte, 1)
 		buffer := make([]byte, 64)
 		buffer := make([]byte, 64)
@@ -188,11 +184,11 @@ func CmdLogin(args ...string) error {
 		return readStringOnRawTerminal(stdin, stdout, false)
 		return readStringOnRawTerminal(stdin, stdout, false)
 	}
 	}
 
 
-	oldState, err := SetRawTerminal()
+	oldState, err := term.SetRawTerminal()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	} else {
 	} else {
-		defer RestoreTerminal(oldState)
+		defer term.RestoreTerminal(oldState)
 	}
 	}
 
 
 	cmd := Subcmd("login", "", "Register or Login to the docker registry server")
 	cmd := Subcmd("login", "", "Register or Login to the docker registry server")
@@ -200,7 +196,7 @@ func CmdLogin(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	body, _, err := call("GET", "/auth", nil)
+	body, _, err := cli.call("GET", "/auth", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -241,7 +237,7 @@ func CmdLogin(args ...string) error {
 	out.Password = password
 	out.Password = password
 	out.Email = email
 	out.Email = email
 
 
-	body, _, err = call("POST", "/auth", out)
+	body, _, err = cli.call("POST", "/auth", out)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -252,14 +248,14 @@ func CmdLogin(args ...string) error {
 		return err
 		return err
 	}
 	}
 	if out2.Status != "" {
 	if out2.Status != "" {
-		RestoreTerminal(oldState)
+		term.RestoreTerminal(oldState)
 		fmt.Print(out2.Status)
 		fmt.Print(out2.Status)
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 // 'docker wait': block until a container stops
 // 'docker wait': block until a container stops
-func CmdWait(args ...string) error {
+func (cli *DockerCli) CmdWait(args ...string) error {
 	cmd := Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.")
 	cmd := Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -269,7 +265,7 @@ func CmdWait(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
-		body, _, err := call("POST", "/containers/"+name+"/wait", nil)
+		body, _, err := cli.call("POST", "/containers/"+name+"/wait", nil)
 		if err != nil {
 		if err != nil {
 			fmt.Printf("%s", err)
 			fmt.Printf("%s", err)
 		} else {
 		} else {
@@ -285,17 +281,20 @@ func CmdWait(args ...string) error {
 }
 }
 
 
 // 'docker version': show version information
 // 'docker version': show version information
-func CmdVersion(args ...string) error {
+func (cli *DockerCli) CmdVersion(args ...string) error {
 	cmd := Subcmd("version", "", "Show the docker version information.")
 	cmd := Subcmd("version", "", "Show the docker version information.")
+	fmt.Println(len(args))
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
 	}
 	}
+
+	fmt.Println(cmd.NArg())
 	if cmd.NArg() > 0 {
 	if cmd.NArg() > 0 {
 		cmd.Usage()
 		cmd.Usage()
 		return nil
 		return nil
 	}
 	}
 
 
-	body, _, err := call("GET", "/version", nil)
+	body, _, err := cli.call("GET", "/version", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -303,7 +302,7 @@ func CmdVersion(args ...string) error {
 	var out ApiVersion
 	var out ApiVersion
 	err = json.Unmarshal(body, &out)
 	err = json.Unmarshal(body, &out)
 	if err != nil {
 	if err != nil {
-		Debugf("Error unmarshal: body: %s, err: %s\n", body, err)
+		utils.Debugf("Error unmarshal: body: %s, err: %s\n", body, err)
 		return err
 		return err
 	}
 	}
 	fmt.Println("Version:", out.Version)
 	fmt.Println("Version:", out.Version)
@@ -319,7 +318,7 @@ func CmdVersion(args ...string) error {
 }
 }
 
 
 // 'docker info': display system-wide information.
 // 'docker info': display system-wide information.
-func CmdInfo(args ...string) error {
+func (cli *DockerCli) CmdInfo(args ...string) error {
 	cmd := Subcmd("info", "", "Display system-wide information")
 	cmd := Subcmd("info", "", "Display system-wide information")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -329,7 +328,7 @@ func CmdInfo(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	body, _, err := call("GET", "/info", nil)
+	body, _, err := cli.call("GET", "/info", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -347,7 +346,7 @@ func CmdInfo(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdStop(args ...string) error {
+func (cli *DockerCli) CmdStop(args ...string) error {
 	cmd := Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container")
 	cmd := Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container")
 	nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
 	nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
@@ -362,7 +361,7 @@ func CmdStop(args ...string) error {
 	v.Set("t", strconv.Itoa(*nSeconds))
 	v.Set("t", strconv.Itoa(*nSeconds))
 
 
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
-		_, _, err := call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil)
+		_, _, err := cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil)
 		if err != nil {
 		if err != nil {
 			fmt.Printf("%s", err)
 			fmt.Printf("%s", err)
 		} else {
 		} else {
@@ -372,7 +371,7 @@ func CmdStop(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdRestart(args ...string) error {
+func (cli *DockerCli) CmdRestart(args ...string) error {
 	cmd := Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
 	cmd := Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
 	nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
 	nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
@@ -387,7 +386,7 @@ func CmdRestart(args ...string) error {
 	v.Set("t", strconv.Itoa(*nSeconds))
 	v.Set("t", strconv.Itoa(*nSeconds))
 
 
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
-		_, _, err := call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil)
+		_, _, err := cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil)
 		if err != nil {
 		if err != nil {
 			fmt.Printf("%s", err)
 			fmt.Printf("%s", err)
 		} else {
 		} else {
@@ -397,7 +396,7 @@ func CmdRestart(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdStart(args ...string) error {
+func (cli *DockerCli) CmdStart(args ...string) error {
 	cmd := Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
 	cmd := Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -408,7 +407,7 @@ func CmdStart(args ...string) error {
 	}
 	}
 
 
 	for _, name := range args {
 	for _, name := range args {
-		_, _, err := call("POST", "/containers/"+name+"/start", nil)
+		_, _, err := cli.call("POST", "/containers/"+name+"/start", nil)
 		if err != nil {
 		if err != nil {
 			fmt.Printf("%s", err)
 			fmt.Printf("%s", err)
 		} else {
 		} else {
@@ -418,7 +417,7 @@ func CmdStart(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdInspect(args ...string) error {
+func (cli *DockerCli) CmdInspect(args ...string) error {
 	cmd := Subcmd("inspect", "CONTAINER|IMAGE", "Return low-level information on a container/image")
 	cmd := Subcmd("inspect", "CONTAINER|IMAGE", "Return low-level information on a container/image")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -427,9 +426,9 @@ func CmdInspect(args ...string) error {
 		cmd.Usage()
 		cmd.Usage()
 		return nil
 		return nil
 	}
 	}
-	obj, _, err := call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
+	obj, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
 	if err != nil {
 	if err != nil {
-		obj, _, err = call("GET", "/images/"+cmd.Arg(0)+"/json", nil)
+		obj, _, err = cli.call("GET", "/images/"+cmd.Arg(0)+"/json", nil)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -445,7 +444,7 @@ func CmdInspect(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdPort(args ...string) error {
+func (cli *DockerCli) CmdPort(args ...string) error {
 	cmd := Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
 	cmd := Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -455,7 +454,7 @@ func CmdPort(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	body, _, err := call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
+	body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -474,7 +473,7 @@ func CmdPort(args ...string) error {
 }
 }
 
 
 // 'docker rmi IMAGE' removes all images with the name IMAGE
 // 'docker rmi IMAGE' removes all images with the name IMAGE
-func CmdRmi(args ...string) error {
+func (cli *DockerCli) CmdRmi(args ...string) error {
 	cmd := Subcmd("rmi", "IMAGE [IMAGE...]", "Remove an image")
 	cmd := Subcmd("rmi", "IMAGE [IMAGE...]", "Remove an image")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -485,7 +484,7 @@ func CmdRmi(args ...string) error {
 	}
 	}
 
 
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
-		_, _, err := call("DELETE", "/images/"+name, nil)
+		_, _, err := cli.call("DELETE", "/images/"+name, nil)
 		if err != nil {
 		if err != nil {
 			fmt.Printf("%s", err)
 			fmt.Printf("%s", err)
 		} else {
 		} else {
@@ -495,7 +494,7 @@ func CmdRmi(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdHistory(args ...string) error {
+func (cli *DockerCli) CmdHistory(args ...string) error {
 	cmd := Subcmd("history", "IMAGE", "Show the history of an image")
 	cmd := Subcmd("history", "IMAGE", "Show the history of an image")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -505,7 +504,7 @@ func CmdHistory(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	body, _, err := call("GET", "/images/"+cmd.Arg(0)+"/history", nil)
+	body, _, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -519,13 +518,13 @@ func CmdHistory(args ...string) error {
 	fmt.Fprintln(w, "ID\tCREATED\tCREATED BY")
 	fmt.Fprintln(w, "ID\tCREATED\tCREATED BY")
 
 
 	for _, out := range outs {
 	for _, out := range outs {
-		fmt.Fprintf(w, "%s\t%s ago\t%s\n", out.Id, HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.CreatedBy)
+		fmt.Fprintf(w, "%s\t%s ago\t%s\n", out.Id, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.CreatedBy)
 	}
 	}
 	w.Flush()
 	w.Flush()
 	return nil
 	return nil
 }
 }
 
 
-func CmdRm(args ...string) error {
+func (cli *DockerCli) CmdRm(args ...string) error {
 	cmd := Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove a container")
 	cmd := Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove a container")
 	v := cmd.Bool("v", false, "Remove the volumes associated to the container")
 	v := cmd.Bool("v", false, "Remove the volumes associated to the container")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
@@ -540,7 +539,7 @@ func CmdRm(args ...string) error {
 		val.Set("v", "1")
 		val.Set("v", "1")
 	}
 	}
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
-		_, _, err := call("DELETE", "/containers/"+name+"?"+val.Encode(), nil)
+		_, _, err := cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil)
 		if err != nil {
 		if err != nil {
 			fmt.Printf("%s", err)
 			fmt.Printf("%s", err)
 		} else {
 		} else {
@@ -551,7 +550,7 @@ func CmdRm(args ...string) error {
 }
 }
 
 
 // 'docker kill NAME' kills a running container
 // 'docker kill NAME' kills a running container
-func CmdKill(args ...string) error {
+func (cli *DockerCli) CmdKill(args ...string) error {
 	cmd := Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container")
 	cmd := Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -562,7 +561,7 @@ func CmdKill(args ...string) error {
 	}
 	}
 
 
 	for _, name := range args {
 	for _, name := range args {
-		_, _, err := call("POST", "/containers/"+name+"/kill", nil)
+		_, _, err := cli.call("POST", "/containers/"+name+"/kill", nil)
 		if err != nil {
 		if err != nil {
 			fmt.Printf("%s", err)
 			fmt.Printf("%s", err)
 		} else {
 		} else {
@@ -572,7 +571,7 @@ func CmdKill(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdImport(args ...string) error {
+func (cli *DockerCli) CmdImport(args ...string) error {
 	cmd := Subcmd("import", "URL|- [REPOSITORY [TAG]]", "Create a new filesystem image from the contents of a tarball")
 	cmd := Subcmd("import", "URL|- [REPOSITORY [TAG]]", "Create a new filesystem image from the contents of a tarball")
 
 
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
@@ -588,14 +587,14 @@ func CmdImport(args ...string) error {
 	v.Set("tag", tag)
 	v.Set("tag", tag)
 	v.Set("fromSrc", src)
 	v.Set("fromSrc", src)
 
 
-	err := hijack("POST", "/images/create?"+v.Encode(), false)
+	err := cli.stream("POST", "/images/create?"+v.Encode(), os.Stdin, os.Stdout)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func CmdPush(args ...string) error {
+func (cli *DockerCli) CmdPush(args ...string) error {
 	cmd := Subcmd("push", "[OPTION] NAME", "Push an image or a repository to the registry")
 	cmd := Subcmd("push", "[OPTION] NAME", "Push an image or a repository to the registry")
 	registry := cmd.String("registry", "", "Registry host to push the image to")
 	registry := cmd.String("registry", "", "Registry host to push the image to")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
@@ -608,7 +607,7 @@ func CmdPush(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	body, _, err := call("GET", "/auth", nil)
+	body, _, err := cli.call("GET", "/auth", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -621,11 +620,11 @@ func CmdPush(args ...string) error {
 
 
 	// If the login failed AND we're using the index, abort
 	// If the login failed AND we're using the index, abort
 	if *registry == "" && out.Username == "" {
 	if *registry == "" && out.Username == "" {
-		if err := CmdLogin(args...); err != nil {
+		if err := cli.CmdLogin(args...); err != nil {
 			return err
 			return err
 		}
 		}
 
 
-		body, _, err = call("GET", "/auth", nil)
+		body, _, err = cli.call("GET", "/auth", nil)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -645,13 +644,13 @@ func CmdPush(args ...string) error {
 
 
 	v := url.Values{}
 	v := url.Values{}
 	v.Set("registry", *registry)
 	v.Set("registry", *registry)
-	if err := hijack("POST", "/images/"+name+"/push?"+v.Encode(), false); err != nil {
+	if err := cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, os.Stdout); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func CmdPull(args ...string) error {
+func (cli *DockerCli) CmdPull(args ...string) error {
 	cmd := Subcmd("pull", "NAME", "Pull an image or a repository from the registry")
 	cmd := Subcmd("pull", "NAME", "Pull an image or a repository from the registry")
 	tag := cmd.String("t", "", "Download tagged image in repository")
 	tag := cmd.String("t", "", "Download tagged image in repository")
 	registry := cmd.String("registry", "", "Registry to download from. Necessary if image is pulled by ID")
 	registry := cmd.String("registry", "", "Registry to download from. Necessary if image is pulled by ID")
@@ -676,17 +675,18 @@ func CmdPull(args ...string) error {
 	v.Set("tag", *tag)
 	v.Set("tag", *tag)
 	v.Set("registry", *registry)
 	v.Set("registry", *registry)
 
 
-	if err := hijack("POST", "/images/create?"+v.Encode(), false); err != nil {
+	if err := cli.stream("POST", "/images/create?"+v.Encode(), nil, os.Stdout); err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	return nil
 	return nil
 }
 }
 
 
-func CmdImages(args ...string) error {
+func (cli *DockerCli) CmdImages(args ...string) error {
 	cmd := Subcmd("images", "[OPTIONS] [NAME]", "List images")
 	cmd := Subcmd("images", "[OPTIONS] [NAME]", "List images")
 	quiet := cmd.Bool("q", false, "only show numeric IDs")
 	quiet := cmd.Bool("q", false, "only show numeric IDs")
 	all := cmd.Bool("a", false, "show all images")
 	all := cmd.Bool("a", false, "show all images")
+	noTrunc := cmd.Bool("notrunc", false, "Don't truncate output")
 	flViz := cmd.Bool("viz", false, "output graph in graphviz format")
 	flViz := cmd.Bool("viz", false, "output graph in graphviz format")
 
 
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
@@ -698,7 +698,7 @@ func CmdImages(args ...string) error {
 	}
 	}
 
 
 	if *flViz {
 	if *flViz {
-		body, _, err := call("GET", "/images/viz", false)
+		body, _, err := cli.call("GET", "/images/viz", false)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -708,14 +708,11 @@ func CmdImages(args ...string) error {
 		if cmd.NArg() == 1 {
 		if cmd.NArg() == 1 {
 			v.Set("filter", cmd.Arg(0))
 			v.Set("filter", cmd.Arg(0))
 		}
 		}
-		if *quiet {
-			v.Set("only_ids", "1")
-		}
 		if *all {
 		if *all {
 			v.Set("all", "1")
 			v.Set("all", "1")
 		}
 		}
 
 
-		body, _, err := call("GET", "/images/json?"+v.Encode(), nil)
+		body, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -732,10 +729,32 @@ func CmdImages(args ...string) error {
 		}
 		}
 
 
 		for _, out := range outs {
 		for _, out := range outs {
+			if out.Repository == "" {
+				out.Repository = "<none>"
+			}
+			if out.Tag == "" {
+				out.Tag = "<none>"
+			}
+
 			if !*quiet {
 			if !*quiet {
-				fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s (virtual %s)\n", out.Repository, out.Tag, out.Id, HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), HumanSize(out.Size), HumanSize(out.ParentSize))
+				fmt.Fprintf(w, "%s\t%s\t", out.Repository, out.Tag)
+				if *noTrunc {
+					fmt.Fprintf(w, "%s\t", out.Id)
+				} else {
+					fmt.Fprintf(w, "%s\t", utils.TruncateId(out.Id))
+				}
+				fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))))
+				if out.ParentSize > 0 {
+					fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.Size), utils.HumanSize(out.ParentSize))
+				} else {
+					fmt.Fprintf(w, "%s\n", utils.HumanSize(out.Size))
+				}
 			} else {
 			} else {
-				fmt.Fprintln(w, out.Id)
+				if *noTrunc {
+					fmt.Fprintln(w, out.Id)
+				} else {
+					fmt.Fprintln(w, utils.TruncateId(out.Id))
+				}
 			}
 			}
 		}
 		}
 
 
@@ -746,7 +765,7 @@ func CmdImages(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdPs(args ...string) error {
+func (cli *DockerCli) CmdPs(args ...string) error {
 	cmd := Subcmd("ps", "[OPTIONS]", "List containers")
 	cmd := Subcmd("ps", "[OPTIONS]", "List containers")
 	quiet := cmd.Bool("q", false, "Only display numeric IDs")
 	quiet := cmd.Bool("q", false, "Only display numeric IDs")
 	all := cmd.Bool("a", false, "Show all containers. Only running containers are shown by default.")
 	all := cmd.Bool("a", false, "Show all containers. Only running containers are shown by default.")
@@ -763,15 +782,9 @@ func CmdPs(args ...string) error {
 	if *last == -1 && *nLatest {
 	if *last == -1 && *nLatest {
 		*last = 1
 		*last = 1
 	}
 	}
-	if *quiet {
-		v.Set("only_ids", "1")
-	}
 	if *all {
 	if *all {
 		v.Set("all", "1")
 		v.Set("all", "1")
 	}
 	}
-	if *noTrunc {
-		v.Set("trunc_cmd", "0")
-	}
 	if *last != -1 {
 	if *last != -1 {
 		v.Set("limit", strconv.Itoa(*last))
 		v.Set("limit", strconv.Itoa(*last))
 	}
 	}
@@ -782,7 +795,7 @@ func CmdPs(args ...string) error {
 		v.Set("before", *before)
 		v.Set("before", *before)
 	}
 	}
 
 
-	body, _, err := call("GET", "/containers/ps?"+v.Encode(), nil)
+	body, _, err := cli.call("GET", "/containers/ps?"+v.Encode(), nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -799,14 +812,22 @@ func CmdPs(args ...string) error {
 
 
 	for _, out := range outs {
 	for _, out := range outs {
 		if !*quiet {
 		if !*quiet {
-			fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\t", out.Id, out.Image, out.Command, out.Status, HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Ports)
+			if *noTrunc {
+				fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\t", out.Id, out.Image, out.Command, out.Status, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Ports)
+			} else {
+				fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\t", utils.TruncateId(out.Id), out.Image, utils.Trunc(out.Command, 20), out.Status, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Ports)
+			}
 			if out.SizeRootFs > 0 {
 			if out.SizeRootFs > 0 {
-				fmt.Fprintf(w, "%s (virtual %s)\n", HumanSize(out.SizeRw), HumanSize(out.SizeRootFs))
+				fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.SizeRw), utils.HumanSize(out.SizeRootFs))
 			} else {
 			} else {
-				fmt.Fprintf(w, "%s\n", HumanSize(out.SizeRw))
+				fmt.Fprintf(w, "%s\n", utils.HumanSize(out.SizeRw))
 			}
 			}
 		} else {
 		} else {
-			fmt.Fprintln(w, out.Id)
+			if *noTrunc {
+				fmt.Fprintln(w, out.Id)
+			} else {
+				fmt.Fprintln(w, utils.TruncateId(out.Id))
+			}
 		}
 		}
 	}
 	}
 
 
@@ -816,7 +837,7 @@ func CmdPs(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdCommit(args ...string) error {
+func (cli *DockerCli) CmdCommit(args ...string) error {
 	cmd := Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY [TAG]]", "Create a new image from a container's changes")
 	cmd := Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY [TAG]]", "Create a new image from a container's changes")
 	flComment := cmd.String("m", "", "Commit message")
 	flComment := cmd.String("m", "", "Commit message")
 	flAuthor := cmd.String("author", "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
 	flAuthor := cmd.String("author", "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
@@ -843,7 +864,7 @@ func CmdCommit(args ...string) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
-	body, _, err := call("POST", "/commit?"+v.Encode(), config)
+	body, _, err := cli.call("POST", "/commit?"+v.Encode(), config)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -858,7 +879,7 @@ func CmdCommit(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdExport(args ...string) error {
+func (cli *DockerCli) CmdExport(args ...string) error {
 	cmd := Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive")
 	cmd := Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -869,13 +890,13 @@ func CmdExport(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	if err := stream("GET", "/containers/"+cmd.Arg(0)+"/export"); err != nil {
+	if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, os.Stdout); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func CmdDiff(args ...string) error {
+func (cli *DockerCli) CmdDiff(args ...string) error {
 	cmd := Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem")
 	cmd := Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -885,7 +906,7 @@ func CmdDiff(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	body, _, err := call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil)
+	body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -901,7 +922,7 @@ func CmdDiff(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdLogs(args ...string) error {
+func (cli *DockerCli) CmdLogs(args ...string) error {
 	cmd := Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
 	cmd := Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -916,13 +937,13 @@ func CmdLogs(args ...string) error {
 	v.Set("stdout", "1")
 	v.Set("stdout", "1")
 	v.Set("stderr", "1")
 	v.Set("stderr", "1")
 
 
-	if err := hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), false); err != nil {
+	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), false); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func CmdAttach(args ...string) error {
+func (cli *DockerCli) CmdAttach(args ...string) error {
 	cmd := Subcmd("attach", "CONTAINER", "Attach to a running container")
 	cmd := Subcmd("attach", "CONTAINER", "Attach to a running container")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -932,7 +953,7 @@ func CmdAttach(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	body, _, err := call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
+	body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -949,13 +970,13 @@ func CmdAttach(args ...string) error {
 	v.Set("stderr", "1")
 	v.Set("stderr", "1")
 	v.Set("stdin", "1")
 	v.Set("stdin", "1")
 
 
-	if err := hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty); err != nil {
+	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func CmdSearch(args ...string) error {
+func (cli *DockerCli) CmdSearch(args ...string) error {
 	cmd := Subcmd("search", "NAME", "Search the docker index for images")
 	cmd := Subcmd("search", "NAME", "Search the docker index for images")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -967,7 +988,7 @@ func CmdSearch(args ...string) error {
 
 
 	v := url.Values{}
 	v := url.Values{}
 	v.Set("term", cmd.Arg(0))
 	v.Set("term", cmd.Arg(0))
-	body, _, err := call("GET", "/images/search?"+v.Encode(), nil)
+	body, _, err := cli.call("GET", "/images/search?"+v.Encode(), nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -1048,7 +1069,7 @@ func (opts PathOpts) Set(val string) error {
 	return nil
 	return nil
 }
 }
 
 
-func CmdTag(args ...string) error {
+func (cli *DockerCli) CmdTag(args ...string) error {
 	cmd := Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY [TAG]", "Tag an image into a repository")
 	cmd := Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY [TAG]", "Tag an image into a repository")
 	force := cmd.Bool("f", false, "Force")
 	force := cmd.Bool("f", false, "Force")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
@@ -1069,13 +1090,13 @@ func CmdTag(args ...string) error {
 		v.Set("force", "1")
 		v.Set("force", "1")
 	}
 	}
 
 
-	if _, _, err := call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil); err != nil {
+	if _, _, err := cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func CmdRun(args ...string) error {
+func (cli *DockerCli) CmdRun(args ...string) error {
 	config, cmd, err := ParseRun(args, nil)
 	config, cmd, err := ParseRun(args, nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -1086,16 +1107,16 @@ func CmdRun(args ...string) error {
 	}
 	}
 
 
 	//create the container
 	//create the container
-	body, statusCode, err := call("POST", "/containers/create", config)
+	body, statusCode, err := cli.call("POST", "/containers/create", config)
 	//if image not found try to pull it
 	//if image not found try to pull it
 	if statusCode == 404 {
 	if statusCode == 404 {
 		v := url.Values{}
 		v := url.Values{}
 		v.Set("fromImage", config.Image)
 		v.Set("fromImage", config.Image)
-		err = hijack("POST", "/images/create?"+v.Encode(), false)
+		err = cli.stream("POST", "/images/create?"+v.Encode(), nil, os.Stderr)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
-		body, _, err = call("POST", "/containers/create", config)
+		body, _, err = cli.call("POST", "/containers/create", config)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -1130,13 +1151,13 @@ func CmdRun(args ...string) error {
 	}
 	}
 
 
 	//start the container
 	//start the container
-	_, _, err = call("POST", "/containers/"+out.Id+"/start", nil)
+	_, _, err = cli.call("POST", "/containers/"+out.Id+"/start", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
 	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
-		if err := hijack("POST", "/containers/"+out.Id+"/attach?"+v.Encode(), config.Tty); err != nil {
+		if err := cli.hijack("POST", "/containers/"+out.Id+"/attach?"+v.Encode(), config.Tty); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -1146,7 +1167,7 @@ func CmdRun(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func call(method, path string, data interface{}) ([]byte, int, error) {
+func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int, error) {
 	var params io.Reader
 	var params io.Reader
 	if data != nil {
 	if data != nil {
 		buf, err := json.Marshal(data)
 		buf, err := json.Marshal(data)
@@ -1156,7 +1177,7 @@ func call(method, path string, data interface{}) ([]byte, int, error) {
 		params = bytes.NewBuffer(buf)
 		params = bytes.NewBuffer(buf)
 	}
 	}
 
 
-	req, err := http.NewRequest(method, "http://0.0.0.0:4243"+path, params)
+	req, err := http.NewRequest(method, fmt.Sprintf("http://%s:%d", cli.host, cli.port)+path, params)
 	if err != nil {
 	if err != nil {
 		return nil, -1, err
 		return nil, -1, err
 	}
 	}
@@ -1184,8 +1205,11 @@ func call(method, path string, data interface{}) ([]byte, int, error) {
 	return body, resp.StatusCode, nil
 	return body, resp.StatusCode, nil
 }
 }
 
 
-func stream(method, path string) error {
-	req, err := http.NewRequest(method, "http://0.0.0.0:4243"+path, nil)
+func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer) error {
+	if (method == "POST" || method == "PUT") && in == nil {
+		in = bytes.NewReader([]byte{})
+	}
+	req, err := http.NewRequest(method, fmt.Sprintf("http://%s:%d%s", cli.host, cli.port, path), in)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -1201,19 +1225,27 @@ func stream(method, path string) error {
 		return err
 		return err
 	}
 	}
 	defer resp.Body.Close()
 	defer resp.Body.Close()
-	if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
+	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+		body, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return err
+		}
+		return fmt.Errorf("error: %s", body)
+	}
+
+	if _, err := io.Copy(out, resp.Body); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func hijack(method, path string, setRawTerminal bool) error {
+func (cli *DockerCli) hijack(method, path string, setRawTerminal bool) error {
 	req, err := http.NewRequest(method, path, nil)
 	req, err := http.NewRequest(method, path, nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 	req.Header.Set("Content-Type", "plain/text")
 	req.Header.Set("Content-Type", "plain/text")
-	dial, err := net.Dial("tcp", "0.0.0.0:4243")
+	dial, err := net.Dial("tcp", fmt.Sprintf("%s:%d", cli.host, cli.port))
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -1224,20 +1256,20 @@ func hijack(method, path string, setRawTerminal bool) error {
 	rwc, br := clientconn.Hijack()
 	rwc, br := clientconn.Hijack()
 	defer rwc.Close()
 	defer rwc.Close()
 
 
-	receiveStdout := Go(func() error {
+	receiveStdout := utils.Go(func() error {
 		_, err := io.Copy(os.Stdout, br)
 		_, err := io.Copy(os.Stdout, br)
 		return err
 		return err
 	})
 	})
 
 
 	if setRawTerminal && term.IsTerminal(int(os.Stdin.Fd())) && os.Getenv("NORAW") == "" {
 	if setRawTerminal && term.IsTerminal(int(os.Stdin.Fd())) && os.Getenv("NORAW") == "" {
-		if oldState, err := SetRawTerminal(); err != nil {
+		if oldState, err := term.SetRawTerminal(); err != nil {
 			return err
 			return err
 		} else {
 		} else {
-			defer RestoreTerminal(oldState)
+			defer term.RestoreTerminal(oldState)
 		}
 		}
 	}
 	}
 
 
-	sendStdin := Go(func() error {
+	sendStdin := utils.Go(func() error {
 		_, err := io.Copy(rwc, os.Stdin)
 		_, err := io.Copy(rwc, os.Stdin)
 		if err := rwc.(*net.TCPConn).CloseWrite(); err != nil {
 		if err := rwc.(*net.TCPConn).CloseWrite(); err != nil {
 			fmt.Fprintf(os.Stderr, "Couldn't send EOF: %s\n", err)
 			fmt.Fprintf(os.Stderr, "Couldn't send EOF: %s\n", err)
@@ -1266,3 +1298,12 @@ func Subcmd(name, signature, description string) *flag.FlagSet {
 	}
 	}
 	return flags
 	return flags
 }
 }
+
+func NewDockerCli(host string, port int) *DockerCli {
+	return &DockerCli{host, port}
+}
+
+type DockerCli struct {
+	host string
+	port int
+}

+ 1 - 0
commands_test.go

@@ -413,6 +413,7 @@ func TestAttachDisconnect(t *testing.T) {
 	container, err := NewBuilder(runtime).Create(
 	container, err := NewBuilder(runtime).Create(
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).Id,
 			Image:     GetTestImage(runtime).Id,
+			CpuShares: 1000,
 			Memory:    33554432,
 			Memory:    33554432,
 			Cmd:       []string{"/bin/cat"},
 			Cmd:       []string{"/bin/cat"},
 			OpenStdin: true,
 			OpenStdin: true,

+ 40 - 35
container.go

@@ -4,6 +4,7 @@ import (
 	"encoding/json"
 	"encoding/json"
 	"flag"
 	"flag"
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/utils"
 	"github.com/kr/pty"
 	"github.com/kr/pty"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
@@ -40,8 +41,8 @@ type Container struct {
 	ResolvConfPath string
 	ResolvConfPath string
 
 
 	cmd       *exec.Cmd
 	cmd       *exec.Cmd
-	stdout    *writeBroadcaster
-	stderr    *writeBroadcaster
+	stdout    *utils.WriteBroadcaster
+	stderr    *utils.WriteBroadcaster
 	stdin     io.ReadCloser
 	stdin     io.ReadCloser
 	stdinPipe io.WriteCloser
 	stdinPipe io.WriteCloser
 	ptyMaster io.Closer
 	ptyMaster io.Closer
@@ -57,6 +58,7 @@ type Config struct {
 	User         string
 	User         string
 	Memory       int64 // Memory limit (in bytes)
 	Memory       int64 // Memory limit (in bytes)
 	MemorySwap   int64 // Total memory usage (memory + swap); set `-1' to disable swap
 	MemorySwap   int64 // Total memory usage (memory + swap); set `-1' to disable swap
+	CpuShares    int64 // CPU shares (relative weight vs. other containers)
 	AttachStdin  bool
 	AttachStdin  bool
 	AttachStdout bool
 	AttachStdout bool
 	AttachStderr bool
 	AttachStderr bool
@@ -92,6 +94,8 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *flag.FlagSet
 		*flMemory = 0
 		*flMemory = 0
 	}
 	}
 
 
+	flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
+
 	var flPorts ListOpts
 	var flPorts ListOpts
 	cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
 	cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
 
 
@@ -138,6 +142,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *flag.FlagSet
 		Tty:          *flTty,
 		Tty:          *flTty,
 		OpenStdin:    *flStdin,
 		OpenStdin:    *flStdin,
 		Memory:       *flMemory,
 		Memory:       *flMemory,
+		CpuShares:    *flCpuShares,
 		AttachStdin:  flAttach.Get("stdin"),
 		AttachStdin:  flAttach.Get("stdin"),
 		AttachStdout: flAttach.Get("stdout"),
 		AttachStdout: flAttach.Get("stdout"),
 		AttachStderr: flAttach.Get("stderr"),
 		AttachStderr: flAttach.Get("stderr"),
@@ -248,9 +253,9 @@ func (container *Container) startPty() error {
 	// Copy the PTYs to our broadcasters
 	// Copy the PTYs to our broadcasters
 	go func() {
 	go func() {
 		defer container.stdout.CloseWriters()
 		defer container.stdout.CloseWriters()
-		Debugf("[startPty] Begin of stdout pipe")
+		utils.Debugf("[startPty] Begin of stdout pipe")
 		io.Copy(container.stdout, ptyMaster)
 		io.Copy(container.stdout, ptyMaster)
-		Debugf("[startPty] End of stdout pipe")
+		utils.Debugf("[startPty] End of stdout pipe")
 	}()
 	}()
 
 
 	// stdin
 	// stdin
@@ -259,9 +264,9 @@ func (container *Container) startPty() error {
 		container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
 		container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
 		go func() {
 		go func() {
 			defer container.stdin.Close()
 			defer container.stdin.Close()
-			Debugf("[startPty] Begin of stdin pipe")
+			utils.Debugf("[startPty] Begin of stdin pipe")
 			io.Copy(ptyMaster, container.stdin)
 			io.Copy(ptyMaster, container.stdin)
-			Debugf("[startPty] End of stdin pipe")
+			utils.Debugf("[startPty] End of stdin pipe")
 		}()
 		}()
 	}
 	}
 	if err := container.cmd.Start(); err != nil {
 	if err := container.cmd.Start(); err != nil {
@@ -281,9 +286,9 @@ func (container *Container) start() error {
 		}
 		}
 		go func() {
 		go func() {
 			defer stdin.Close()
 			defer stdin.Close()
-			Debugf("Begin of stdin pipe [start]")
+			utils.Debugf("Begin of stdin pipe [start]")
 			io.Copy(stdin, container.stdin)
 			io.Copy(stdin, container.stdin)
-			Debugf("End of stdin pipe [start]")
+			utils.Debugf("End of stdin pipe [start]")
 		}()
 		}()
 	}
 	}
 	return container.cmd.Start()
 	return container.cmd.Start()
@@ -300,8 +305,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 			errors <- err
 			errors <- err
 		} else {
 		} else {
 			go func() {
 			go func() {
-				Debugf("[start] attach stdin\n")
-				defer Debugf("[end] attach stdin\n")
+				utils.Debugf("[start] attach stdin\n")
+				defer utils.Debugf("[end] attach stdin\n")
 				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
 				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
 				if cStdout != nil {
 				if cStdout != nil {
 					defer cStdout.Close()
 					defer cStdout.Close()
@@ -313,12 +318,12 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 					defer cStdin.Close()
 					defer cStdin.Close()
 				}
 				}
 				if container.Config.Tty {
 				if container.Config.Tty {
-					_, err = CopyEscapable(cStdin, stdin)
+					_, err = utils.CopyEscapable(cStdin, stdin)
 				} else {
 				} else {
 					_, err = io.Copy(cStdin, stdin)
 					_, err = io.Copy(cStdin, stdin)
 				}
 				}
 				if err != nil {
 				if err != nil {
-					Debugf("[error] attach stdin: %s\n", err)
+					utils.Debugf("[error] attach stdin: %s\n", err)
 				}
 				}
 				// Discard error, expecting pipe error
 				// Discard error, expecting pipe error
 				errors <- nil
 				errors <- nil
@@ -332,8 +337,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 		} else {
 		} else {
 			cStdout = p
 			cStdout = p
 			go func() {
 			go func() {
-				Debugf("[start] attach stdout\n")
-				defer Debugf("[end]  attach stdout\n")
+				utils.Debugf("[start] attach stdout\n")
+				defer utils.Debugf("[end]  attach stdout\n")
 				// If we are in StdinOnce mode, then close stdin
 				// If we are in StdinOnce mode, then close stdin
 				if container.Config.StdinOnce {
 				if container.Config.StdinOnce {
 					if stdin != nil {
 					if stdin != nil {
@@ -345,7 +350,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 				}
 				}
 				_, err := io.Copy(stdout, cStdout)
 				_, err := io.Copy(stdout, cStdout)
 				if err != nil {
 				if err != nil {
-					Debugf("[error] attach stdout: %s\n", err)
+					utils.Debugf("[error] attach stdout: %s\n", err)
 				}
 				}
 				errors <- err
 				errors <- err
 			}()
 			}()
@@ -358,8 +363,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 		} else {
 		} else {
 			cStderr = p
 			cStderr = p
 			go func() {
 			go func() {
-				Debugf("[start] attach stderr\n")
-				defer Debugf("[end]  attach stderr\n")
+				utils.Debugf("[start] attach stderr\n")
+				defer utils.Debugf("[end]  attach stderr\n")
 				// If we are in StdinOnce mode, then close stdin
 				// If we are in StdinOnce mode, then close stdin
 				if container.Config.StdinOnce {
 				if container.Config.StdinOnce {
 					if stdin != nil {
 					if stdin != nil {
@@ -371,13 +376,13 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 				}
 				}
 				_, err := io.Copy(stderr, cStderr)
 				_, err := io.Copy(stderr, cStderr)
 				if err != nil {
 				if err != nil {
-					Debugf("[error] attach stderr: %s\n", err)
+					utils.Debugf("[error] attach stderr: %s\n", err)
 				}
 				}
 				errors <- err
 				errors <- err
 			}()
 			}()
 		}
 		}
 	}
 	}
-	return Go(func() error {
+	return utils.Go(func() error {
 		if cStdout != nil {
 		if cStdout != nil {
 			defer cStdout.Close()
 			defer cStdout.Close()
 		}
 		}
@@ -387,14 +392,14 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 		// FIXME: how do clean up the stdin goroutine without the unwanted side effect
 		// FIXME: how do clean up the stdin goroutine without the unwanted side effect
 		// of closing the passed stdin? Add an intermediary io.Pipe?
 		// of closing the passed stdin? Add an intermediary io.Pipe?
 		for i := 0; i < nJobs; i += 1 {
 		for i := 0; i < nJobs; i += 1 {
-			Debugf("Waiting for job %d/%d\n", i+1, nJobs)
+			utils.Debugf("Waiting for job %d/%d\n", i+1, nJobs)
 			if err := <-errors; err != nil {
 			if err := <-errors; err != nil {
-				Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
+				utils.Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
 				return err
 				return err
 			}
 			}
-			Debugf("Job %d completed successfully\n", i+1)
+			utils.Debugf("Job %d completed successfully\n", i+1)
 		}
 		}
-		Debugf("All jobs completed successfully\n")
+		utils.Debugf("All jobs completed successfully\n")
 		return nil
 		return nil
 	})
 	})
 }
 }
@@ -552,13 +557,13 @@ func (container *Container) StdinPipe() (io.WriteCloser, error) {
 func (container *Container) StdoutPipe() (io.ReadCloser, error) {
 func (container *Container) StdoutPipe() (io.ReadCloser, error) {
 	reader, writer := io.Pipe()
 	reader, writer := io.Pipe()
 	container.stdout.AddWriter(writer)
 	container.stdout.AddWriter(writer)
-	return newBufReader(reader), nil
+	return utils.NewBufReader(reader), nil
 }
 }
 
 
 func (container *Container) StderrPipe() (io.ReadCloser, error) {
 func (container *Container) StderrPipe() (io.ReadCloser, error) {
 	reader, writer := io.Pipe()
 	reader, writer := io.Pipe()
 	container.stderr.AddWriter(writer)
 	container.stderr.AddWriter(writer)
-	return newBufReader(reader), nil
+	return utils.NewBufReader(reader), nil
 }
 }
 
 
 func (container *Container) allocateNetwork() error {
 func (container *Container) allocateNetwork() error {
@@ -606,20 +611,20 @@ func (container *Container) waitLxc() error {
 
 
 func (container *Container) monitor() {
 func (container *Container) monitor() {
 	// Wait for the program to exit
 	// Wait for the program to exit
-	Debugf("Waiting for process")
+	utils.Debugf("Waiting for process")
 
 
 	// If the command does not exists, try to wait via lxc
 	// If the command does not exists, try to wait via lxc
 	if container.cmd == nil {
 	if container.cmd == nil {
 		if err := container.waitLxc(); err != nil {
 		if err := container.waitLxc(); err != nil {
-			Debugf("%s: Process: %s", container.Id, err)
+			utils.Debugf("%s: Process: %s", container.Id, err)
 		}
 		}
 	} else {
 	} else {
 		if err := container.cmd.Wait(); err != nil {
 		if err := container.cmd.Wait(); err != nil {
 			// Discard the error as any signals or non 0 returns will generate an error
 			// Discard the error as any signals or non 0 returns will generate an error
-			Debugf("%s: Process: %s", container.Id, err)
+			utils.Debugf("%s: Process: %s", container.Id, err)
 		}
 		}
 	}
 	}
-	Debugf("Process finished")
+	utils.Debugf("Process finished")
 
 
 	var exitCode int = -1
 	var exitCode int = -1
 	if container.cmd != nil {
 	if container.cmd != nil {
@@ -630,19 +635,19 @@ func (container *Container) monitor() {
 	container.releaseNetwork()
 	container.releaseNetwork()
 	if container.Config.OpenStdin {
 	if container.Config.OpenStdin {
 		if err := container.stdin.Close(); err != nil {
 		if err := container.stdin.Close(); err != nil {
-			Debugf("%s: Error close stdin: %s", container.Id, err)
+			utils.Debugf("%s: Error close stdin: %s", container.Id, err)
 		}
 		}
 	}
 	}
 	if err := container.stdout.CloseWriters(); err != nil {
 	if err := container.stdout.CloseWriters(); err != nil {
-		Debugf("%s: Error close stdout: %s", container.Id, err)
+		utils.Debugf("%s: Error close stdout: %s", container.Id, err)
 	}
 	}
 	if err := container.stderr.CloseWriters(); err != nil {
 	if err := container.stderr.CloseWriters(); err != nil {
-		Debugf("%s: Error close stderr: %s", container.Id, err)
+		utils.Debugf("%s: Error close stderr: %s", container.Id, err)
 	}
 	}
 
 
 	if container.ptyMaster != nil {
 	if container.ptyMaster != nil {
 		if err := container.ptyMaster.Close(); err != nil {
 		if err := container.ptyMaster.Close(); err != nil {
-			Debugf("%s: Error closing Pty master: %s", container.Id, err)
+			utils.Debugf("%s: Error closing Pty master: %s", container.Id, err)
 		}
 		}
 	}
 	}
 
 
@@ -759,7 +764,7 @@ func (container *Container) RwChecksum() (string, error) {
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
-	return HashData(rwData)
+	return utils.HashData(rwData)
 }
 }
 
 
 func (container *Container) Export() (Archive, error) {
 func (container *Container) Export() (Archive, error) {
@@ -830,7 +835,7 @@ func (container *Container) Unmount() error {
 // In case of a collision a lookup with Runtime.Get() will fail, and the caller
 // In case of a collision a lookup with Runtime.Get() will fail, and the caller
 // will need to use a langer prefix, or the full-length container Id.
 // will need to use a langer prefix, or the full-length container Id.
 func (container *Container) ShortId() string {
 func (container *Container) ShortId() string {
-	return TruncateId(container.Id)
+	return utils.TruncateId(container.Id)
 }
 }
 
 
 func (container *Container) logPath(name string) string {
 func (container *Container) logPath(name string) string {

+ 8 - 2
container_test.go

@@ -390,6 +390,7 @@ func TestStart(t *testing.T) {
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).Id,
 			Image:     GetTestImage(runtime).Id,
 			Memory:    33554432,
 			Memory:    33554432,
+			CpuShares: 1000,
 			Cmd:       []string{"/bin/cat"},
 			Cmd:       []string{"/bin/cat"},
 			OpenStdin: true,
 			OpenStdin: true,
 		},
 		},
@@ -1063,12 +1064,17 @@ func TestLXCConfig(t *testing.T) {
 	memMin := 33554432
 	memMin := 33554432
 	memMax := 536870912
 	memMax := 536870912
 	mem := memMin + rand.Intn(memMax-memMin)
 	mem := memMin + rand.Intn(memMax-memMin)
+	// CPU shares as well
+	cpuMin := 100
+	cpuMax := 10000
+	cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
 	container, err := NewBuilder(runtime).Create(&Config{
 	container, err := NewBuilder(runtime).Create(&Config{
 		Image: GetTestImage(runtime).Id,
 		Image: GetTestImage(runtime).Id,
 		Cmd:   []string{"/bin/true"},
 		Cmd:   []string{"/bin/true"},
 
 
-		Hostname: "foobar",
-		Memory:   int64(mem),
+		Hostname:  "foobar",
+		Memory:    int64(mem),
+		CpuShares: int64(cpu),
 	},
 	},
 	)
 	)
 	if err != nil {
 	if err != nil {

+ 2 - 1
docker/docker.go

@@ -4,6 +4,7 @@ import (
 	"flag"
 	"flag"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker"
 	"github.com/dotcloud/docker"
+	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"io/ioutil"
 	"log"
 	"log"
 	"os"
 	"os"
@@ -17,7 +18,7 @@ var (
 )
 )
 
 
 func main() {
 func main() {
-	if docker.SelfPath() == "/sbin/init" {
+	if utils.SelfPath() == "/sbin/init" {
 		// Running in init mode
 		// Running in init mode
 		docker.SysInit()
 		docker.SysInit()
 		return
 		return

+ 3 - 8
docs/Makefile

@@ -44,7 +44,7 @@ clean:
 	-rm -rf $(BUILDDIR)/*
 	-rm -rf $(BUILDDIR)/*
 
 
 docs:
 docs:
-	-rm -rf $(BUILDDIR)/*
+	#-rm -rf $(BUILDDIR)/*
 	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/html
 	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/html
 	@echo
 	@echo
 	@echo "Build finished. The documentation pages are now in $(BUILDDIR)/html."
 	@echo "Build finished. The documentation pages are now in $(BUILDDIR)/html."
@@ -59,18 +59,13 @@ site:
 connect:
 connect:
 	@echo connecting dotcloud to www.docker.io website, make sure to use user 1
 	@echo connecting dotcloud to www.docker.io website, make sure to use user 1
 	@cd _build/website/ ; \
 	@cd _build/website/ ; \
-	dotcloud list ; \
-	dotcloud connect dockerwebsite
+	dotcloud connect dockerwebsite ;
+	dotcloud list
 
 
 push:
 push:
 	@cd _build/website/ ; \
 	@cd _build/website/ ; \
 	dotcloud push
 	dotcloud push
 
 
-github-deploy: docs
-	rm -fr github-deploy
-	git clone ssh://git@github.com/dotcloud/docker github-deploy
-	cd github-deploy && git checkout -f gh-pages && git rm -r * && rsync -avH ../_build/html/ ./ && touch .nojekyll && echo "docker.io" > CNAME && git add * && git commit -m "Updating docs"
-
 $(VERSIONS):
 $(VERSIONS):
 	@echo "Hello world"
 	@echo "Hello world"
 
 

+ 0 - 0
docs/sources/.nojekyll


+ 0 - 1
docs/sources/CNAME

@@ -1 +0,0 @@
-docker.io

+ 17 - 15
docs/sources/remote-api/api.rst → docs/sources/api/docker_remote_api.rst

@@ -9,7 +9,7 @@ Docker Remote API
 
 
 - The Remote API is replacing rcli
 - The Remote API is replacing rcli
 - Default port in the docker deamon is 4243 
 - Default port in the docker deamon is 4243 
-- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection in hijacked to transport stdout stdin and stderr
+- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr
 
 
 2. Endpoints
 2. Endpoints
 ============
 ============
@@ -28,7 +28,7 @@ List containers
 
 
 	.. sourcecode:: http
 	.. sourcecode:: http
 
 
-	   GET /containers/ps?trunc_cmd=0&all=1&only_ids=0&before=8dfafdbc3a40 HTTP/1.1
+	   GET /containers/ps?all=1&before=8dfafdbc3a40 HTTP/1.1
 	   
 	   
 	**Example response**:
 	**Example response**:
 
 
@@ -68,13 +68,12 @@ List containers
 		}
 		}
 	   ]
 	   ]
  
  
-	:query only_ids: 1 or 0, Only display numeric IDs. Default 0
-	:query all: 1 or 0, Show all containers. Only running containers are shown by default
-	:query trunc_cmd: 1 or 0, Truncate output. Output is truncated by default  
+	:query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default
 	:query limit: Show ``limit`` last created containers, include non-running ones.
 	:query limit: Show ``limit`` last created containers, include non-running ones.
 	:query since: Show only containers created since Id, include non-running ones.
 	:query since: Show only containers created since Id, include non-running ones.
 	:query before: Show only containers created before Id, include non-running ones.
 	:query before: Show only containers created before Id, include non-running ones.
 	:statuscode 200: no error
 	:statuscode 200: no error
+	:statuscode 400: bad parameter
 	:statuscode 500: server error
 	:statuscode 500: server error
 
 
 
 
@@ -391,12 +390,13 @@ Attach to a container
 
 
 	   {{ STREAM }}
 	   {{ STREAM }}
 	   	
 	   	
-	:query logs: 1 or 0, return logs. Default 0
-	:query stream: 1 or 0, return stream. Default 0
-	:query stdin: 1 or 0, if stream=1, attach to stdin. Default 0
-	:query stdout: 1 or 0, if logs=1, return stdout log, if stream=1, attach to stdout. Default 0
-	:query stderr: 1 or 0, if logs=1, return stderr log, if stream=1, attach to stderr. Default 0
+	:query logs: 1/True/true or 0/False/false, return logs. Default false
+	:query stream: 1/True/true or 0/False/false, return stream. Default false
+	:query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false
+	:query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false
+	:query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false
 	:statuscode 200: no error
 	:statuscode 200: no error
+	:statuscode 400: bad parameter
 	:statuscode 404: no such container
 	:statuscode 404: no such container
 	:statuscode 500: server error
 	:statuscode 500: server error
 
 
@@ -447,8 +447,9 @@ Remove a container
 
 
 	   HTTP/1.1 204 OK
 	   HTTP/1.1 204 OK
 
 
-	:query v: 1 or 0, Remove the volumes associated to the container. Default 0
+	:query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false
         :statuscode 204: no error
         :statuscode 204: no error
+	:statuscode 400: bad parameter
         :statuscode 404: no such container
         :statuscode 404: no such container
         :statuscode 500: server error
         :statuscode 500: server error
 
 
@@ -467,7 +468,7 @@ List Images
 
 
 	.. sourcecode:: http
 	.. sourcecode:: http
 
 
-	   GET /images/json?all=0&only_ids=0 HTTP/1.1
+	   GET /images/json?all=0 HTTP/1.1
 
 
 	**Example response**:
 	**Example response**:
 
 
@@ -523,9 +524,9 @@ List Images
 	   base [style=invisible]
 	   base [style=invisible]
 	   }
 	   }
  
  
-	:query only_ids: 1 or 0, Only display numeric IDs. Default 0
-	:query all: 1 or 0, Show all containers. Only running containers are shown by default
+	:query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default
 	:statuscode 200: no error
 	:statuscode 200: no error
+	:statuscode 400: bad parameter
 	:statuscode 500: server error
 	:statuscode 500: server error
 
 
 
 
@@ -723,8 +724,9 @@ Tag an image into a repository
            HTTP/1.1 200 OK
            HTTP/1.1 200 OK
 
 
 	:query repo: The repository to tag in
 	:query repo: The repository to tag in
-	:query force: 1 or 0, default 0
+	:query force: 1/True/true or 0/False/false, default false
 	:statuscode 200: no error
 	:statuscode 200: no error
+	:statuscode 400: bad parameter
 	:statuscode 404: no such image
 	:statuscode 404: no such image
         :statuscode 500: server error
         :statuscode 500: server error
 
 

+ 17 - 0
docs/sources/api/index.rst

@@ -0,0 +1,17 @@
+:title: docker documentation
+:description: docker documentation
+:keywords:
+
+API's
+=============
+
+This following :
+
+.. toctree::
+  :maxdepth: 3
+
+  registry_api
+  index_search_api
+  docker_remote_api
+
+

+ 6 - 1
docs/sources/index/search.rst → docs/sources/api/index_search_api.rst

@@ -1,3 +1,8 @@
+:title: Docker Index documentation
+:description: Documentation for docker Index
+:keywords: docker, index, api
+
+
 =======================
 =======================
 Docker Index Search API
 Docker Index Search API
 =======================
 =======================
@@ -32,7 +37,7 @@ Search
            {"name": "base2", "description": "A base ubuntu64  image..."},
            {"name": "base2", "description": "A base ubuntu64  image..."},
          ]
          ]
        }
        }
-   
+
    :query q: what you want to search for
    :query q: what you want to search for
    :statuscode 200: no error
    :statuscode 200: no error
    :statuscode 500: server error
    :statuscode 500: server error

+ 17 - 12
docs/sources/registry/api.rst → docs/sources/api/registry_api.rst

@@ -1,3 +1,8 @@
+:title: docker Registry documentation
+:description: Documentation for docker Registry and Registry API
+:keywords: docker, registry, api, index
+
+
 ===================
 ===================
 Docker Registry API
 Docker Registry API
 ===================
 ===================
@@ -44,7 +49,7 @@ We expect that there will be multiple registries out there. To help to grasp the
 
 
 .. note::
 .. note::
 
 
-    Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server. 
+    Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
 
 
 .. note::
 .. note::
 
 
@@ -80,7 +85,7 @@ On top of being a runtime for LXC, Docker is the Registry client. It supports:
 5. Index returns true/false lettings registry know if it should proceed or error out
 5. Index returns true/false lettings registry know if it should proceed or error out
 6. Get the payload for all layers
 6. Get the payload for all layers
 
 
-It’s possible to run docker pull https://<registry>/repositories/samalba/busybox. In this case, docker bypasses the Index. However the security is not guaranteed (in case Registry A is corrupted) because there won’t be any checksum checks.
+It’s possible to run docker pull \https://<registry>/repositories/samalba/busybox. In this case, docker bypasses the Index. However the security is not guaranteed (in case Registry A is corrupted) because there won’t be any checksum checks.
 
 
 Currently registry redirects to s3 urls for downloads, going forward all downloads need to be streamed through the registry. The Registry will then abstract the calls to S3 by a top-level class which implements sub-classes for S3 and local storage.
 Currently registry redirects to s3 urls for downloads, going forward all downloads need to be streamed through the registry. The Registry will then abstract the calls to S3 by a top-level class which implements sub-classes for S3 and local storage.
 
 
@@ -107,7 +112,7 @@ API (pulling repository foo/bar):
         Jsonified checksums (see part 4.4.1)
         Jsonified checksums (see part 4.4.1)
 
 
 3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest
 3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest
-    **Headers**: 
+    **Headers**:
         Authorization: Token signature=123abc,repository=”foo/bar”,access=write
         Authorization: Token signature=123abc,repository=”foo/bar”,access=write
 
 
 4. (Registry -> Index) GET /v1/repositories/foo/bar/images
 4. (Registry -> Index) GET /v1/repositories/foo/bar/images
@@ -121,10 +126,10 @@ API (pulling repository foo/bar):
     **Action**:
     **Action**:
         ( Lookup token see if they have access to pull.)
         ( Lookup token see if they have access to pull.)
 
 
-        If good: 
+        If good:
             HTTP 200 OK
             HTTP 200 OK
             Index will invalidate the token
             Index will invalidate the token
-        If bad: 
+        If bad:
             HTTP 401 Unauthorized
             HTTP 401 Unauthorized
 
 
 5. (Docker -> Registry) GET /v1/images/928374982374/ancestry
 5. (Docker -> Registry) GET /v1/images/928374982374/ancestry
@@ -186,9 +191,9 @@ API (pushing repos foo/bar):
     **Headers**:
     **Headers**:
         Authorization: Token signature=123abc,repository=”foo/bar”,access=write
         Authorization: Token signature=123abc,repository=”foo/bar”,access=write
     **Action**::
     **Action**::
-        - Index: 
+        - Index:
             will invalidate the token.
             will invalidate the token.
-        - Registry: 
+        - Registry:
             grants a session (if token is approved) and fetches the images id
             grants a session (if token is approved) and fetches the images id
 
 
 5. (Docker -> Registry) PUT /v1/images/98765432_parent/json
 5. (Docker -> Registry) PUT /v1/images/98765432_parent/json
@@ -223,7 +228,7 @@ API (pushing repos foo/bar):
     **Body**:
     **Body**:
         (The image, id’s, tags and checksums)
         (The image, id’s, tags and checksums)
 
 
-        [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, 
+        [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
         “checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
         “checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
 
 
     **Return** HTTP 204
     **Return** HTTP 204
@@ -240,8 +245,8 @@ API (pushing repos foo/bar):
 The Index has two main purposes (along with its fancy social features):
 The Index has two main purposes (along with its fancy social features):
 
 
 - Resolve short names (to avoid passing absolute URLs all the time)
 - Resolve short names (to avoid passing absolute URLs all the time)
-   - username/projectname -> https://registry.docker.io/users/<username>/repositories/<projectname>/
-   - team/projectname -> https://registry.docker.io/team/<team>/repositories/<projectname>/
+   - username/projectname -> \https://registry.docker.io/users/<username>/repositories/<projectname>/
+   - team/projectname -> \https://registry.docker.io/team/<team>/repositories/<projectname>/
 - Authenticate a user as a repos owner (for a central referenced repository)
 - Authenticate a user as a repos owner (for a central referenced repository)
 
 
 3.1 Without an Index
 3.1 Without an Index
@@ -296,7 +301,7 @@ POST /v1/users
     {"email": "sam@dotcloud.com", "password": "toto42", "username": "foobar"'}
     {"email": "sam@dotcloud.com", "password": "toto42", "username": "foobar"'}
 
 
 **Validation**:
 **Validation**:
-    - **username** : min 4 character, max 30 characters, all lowercase no special characters. 
+    - **username** : min 4 character, max 30 characters, all lowercase no special characters.
     - **password**: min 5 characters
     - **password**: min 5 characters
 
 
 **Valid**: return HTTP 200
 **Valid**: return HTTP 200
@@ -387,7 +392,7 @@ PUT /v1/repositories/<namespace>/<repo_name>/images
 
 
 **Body**:
 **Body**:
     [ {“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”} ]
     [ {“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”} ]
-    
+
 **Return** 204
 **Return** 204
 
 
 5. Chaining Registries
 5. Chaining Registries

+ 0 - 14
docs/sources/builder/index.rst

@@ -1,14 +0,0 @@
-:title: docker documentation
-:description: Documentation for docker builder
-:keywords: docker, builder, dockerfile
-
-
-Builder
-=======
-
-Contents:
-
-.. toctree::
-  :maxdepth: 2
-
-  basics

+ 2 - 2
docs/sources/commandline/cli.rst

@@ -4,7 +4,7 @@
 
 
 .. _cli:
 .. _cli:
 
 
-Command Line Interface
+Overview
 ======================
 ======================
 
 
 Docker Usage
 Docker Usage
@@ -24,7 +24,7 @@ Available Commands
 ~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~
 
 
 .. toctree::
 .. toctree::
-   :maxdepth: 1
+   :maxdepth: 2
 
 
    command/attach
    command/attach
    command/build
    command/build

+ 1 - 0
docs/sources/commandline/command/commit.rst

@@ -16,6 +16,7 @@ Full -run example::
 
 
     {"Hostname": "",
     {"Hostname": "",
      "User": "",
      "User": "",
+     "CpuShares": 0,
      "Memory": 0,
      "Memory": 0,
      "MemorySwap": 0,
      "MemorySwap": 0,
      "PortSpecs": ["22", "80", "443"],
      "PortSpecs": ["22", "80", "443"],

+ 1 - 0
docs/sources/commandline/command/run.rst

@@ -9,6 +9,7 @@
     Run a command in a new container
     Run a command in a new container
 
 
       -a=map[]: Attach to stdin, stdout or stderr.
       -a=map[]: Attach to stdin, stdout or stderr.
+      -c=0: CPU shares (relative weight)
       -d=false: Detached mode: leave the container running in the background
       -d=false: Detached mode: leave the container running in the background
       -e=[]: Set environment variables
       -e=[]: Set environment variables
       -h="": Container host name
       -h="": Container host name

+ 28 - 3
docs/sources/commandline/index.rst

@@ -9,8 +9,33 @@ Commands
 Contents:
 Contents:
 
 
 .. toctree::
 .. toctree::
-  :maxdepth: 3
+  :maxdepth: 1
 
 
-  basics
-  workingwithrepository
   cli
   cli
+  attach  <command/attach>
+  build   <command/build>
+  commit  <command/commit>
+  diff    <command/diff>
+  export  <command/export>
+  history <command/history>
+  images  <command/images>
+  import  <command/import>
+  info    <command/info>
+  inspect <command/inspect>
+  kill    <command/kill>
+  login   <command/login>
+  logs    <command/logs>
+  port    <command/port>
+  ps      <command/ps>
+  pull    <command/pull>
+  push    <command/push>
+  restart <command/restart>
+  rm      <command/rm>
+  rmi     <command/rmi>
+  run     <command/run>
+  search  <command/search>
+  start   <command/start>
+  stop    <command/stop>
+  tag     <command/tag>
+  version <command/version>
+  wait    <command/wait>

+ 0 - 42
docs/sources/commandline/workingwithrepository.rst

@@ -1,42 +0,0 @@
-.. _working_with_the_repository:
-
-Working with the repository
-============================
-
-Connecting to the repository
-----------------------------
-
-You create a user on the central docker repository by running
-
-.. code-block:: bash
-
-    docker login
-
-
-If your username does not exist it will prompt you to also enter a password and your e-mail address. It will then
-automatically log you in.
-
-
-Committing a container to a named image
----------------------------------------
-
-In order to commit to the repository it is required to have committed your container to an image with your namespace.
-
-.. code-block:: bash
-
-    # for example docker commit $CONTAINER_ID dhrp/kickassapp
-    docker commit <container_id> <your username>/<some_name>
-
-
-Pushing a container to the repository
------------------------------------------
-
-In order to push an image to the repository you need to have committed your container to a named image (see above)
-
-Now you can commit this image to the repository
-
-.. code-block:: bash
-
-    # for example docker push dhrp/kickassapp
-    docker push <image-name>
-

+ 0 - 0
docs/sources/static_files/lego_docker.jpg → docs/sources/concepts/images/lego_docker.jpg


+ 1 - 1
docs/sources/concepts/index.rst

@@ -12,6 +12,6 @@ Contents:
 .. toctree::
 .. toctree::
    :maxdepth: 1
    :maxdepth: 1
 
 
-   introduction
+   ../index
    buildingblocks
    buildingblocks
 
 

+ 1 - 3
docs/sources/concepts/introduction.rst

@@ -2,8 +2,6 @@
 :description: An introduction to docker and standard containers?
 :description: An introduction to docker and standard containers?
 :keywords: containers, lxc, concepts, explanation
 :keywords: containers, lxc, concepts, explanation
 
 
-.. _introduction:
-
 Introduction
 Introduction
 ============
 ============
 
 
@@ -20,7 +18,7 @@ Docker is a great building block for automating distributed systems: large-scale
 - **Isolation** docker isolates processes from each other and from the underlying host, using lightweight containers.
 - **Isolation** docker isolates processes from each other and from the underlying host, using lightweight containers.
 - **Repeatability** Because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
 - **Repeatability** Because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
 
 
-.. image:: http://www.docker.io/_static/lego_docker.jpg
+.. image:: images/lego_docker.jpg
 
 
 
 
 What is a Standard Container?
 What is a Standard Container?

+ 1 - 1
docs/sources/conf.py

@@ -41,7 +41,7 @@ html_add_permalinks = None
 
 
 
 
 # The master toctree document.
 # The master toctree document.
-master_doc = 'index'
+master_doc = 'toctree'
 
 
 # General information about the project.
 # General information about the project.
 project = u'Docker'
 project = u'Docker'

+ 1 - 1
docs/sources/contributing/devenvironment.rst

@@ -16,7 +16,7 @@ Instructions that have been verified to work on Ubuntu 12.10,
 
 
     mkdir -p $GOPATH/src/github.com/dotcloud
     mkdir -p $GOPATH/src/github.com/dotcloud
     cd $GOPATH/src/github.com/dotcloud
     cd $GOPATH/src/github.com/dotcloud
-    git clone git@github.com:dotcloud/docker.git
+    git clone git://github.com/dotcloud/docker.git
     cd docker
     cd docker
 
 
     go get -v github.com/dotcloud/docker/...
     go get -v github.com/dotcloud/docker/...

+ 0 - 2
docs/sources/dotcloud.yml

@@ -1,2 +0,0 @@
-www:
-  type: static

+ 1 - 1
docs/sources/examples/couchdb_data_volumes.rst

@@ -5,7 +5,7 @@
 .. _running_couchdb_service:
 .. _running_couchdb_service:
 
 
 Create a CouchDB service
 Create a CouchDB service
-======================
+========================
 
 
 .. include:: example_header.inc
 .. include:: example_header.inc
 
 

+ 2 - 2
docs/sources/examples/python_web_app.rst

@@ -58,7 +58,7 @@ Use the new image we just created and create a new container with network port 5
 .. code-block:: bash
 .. code-block:: bash
 
 
     docker logs $WEB_WORKER
     docker logs $WEB_WORKER
-     * Running on http://0.0.0.0:5000/
+     * Running on \http://0.0.0.0:5000/
 
 
 view the logs for the new container using the WEB_WORKER variable, and if everything worked as planned you should see the line "Running on http://0.0.0.0:5000/" in the log output.
 view the logs for the new container using the WEB_WORKER variable, and if everything worked as planned you should see the line "Running on http://0.0.0.0:5000/" in the log output.
 
 
@@ -70,7 +70,7 @@ lookup the public-facing port which is NAT-ed store the private port used by the
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    curl http://`hostname`:$WEB_PORT
+    curl \http://`hostname`:$WEB_PORT
       Hello world!
       Hello world!
 
 
 access the web app using curl. If everything worked as planned you should see the line "Hello world!" inside of your console.
 access the web app using curl. If everything worked as planned you should see the line "Hello world!" inside of your console.

+ 3 - 3
docs/sources/faq.rst

@@ -15,7 +15,7 @@ Most frequently asked questions.
 
 
 3. **Does Docker run on Mac OS X or Windows?**
 3. **Does Docker run on Mac OS X or Windows?**
 
 
-   Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a virtual machine on your box, and get the best of both worlds. Check out the MacOSX_ and Windows_ intallation guides.
+   Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a virtual machine on your box, and get the best of both worlds. Check out the MacOSX_ and Windows_ installation guides.
 
 
 4. **How do containers compare to virtual machines?**
 4. **How do containers compare to virtual machines?**
 
 
@@ -35,8 +35,8 @@ Most frequently asked questions.
     * `Ask questions on Stackoverflow`_
     * `Ask questions on Stackoverflow`_
     * `Join the conversation on Twitter`_
     * `Join the conversation on Twitter`_
 
 
-    .. _Windows: ../documentation/installation/windows.html
-    .. _MacOSX: ../documentation/installation/macos.html
+    .. _Windows: ../installation/windows/
+    .. _MacOSX: ../installation/vagrant/
     .. _the repo: http://www.github.com/dotcloud/docker
     .. _the repo: http://www.github.com/dotcloud/docker
     .. _IRC\: docker on freenode: irc://chat.freenode.net#docker
     .. _IRC\: docker on freenode: irc://chat.freenode.net#docker
     .. _Github: http://www.github.com/dotcloud/docker
     .. _Github: http://www.github.com/dotcloud/docker

+ 0 - 210
docs/sources/gettingstarted/index.html

@@ -1,210 +0,0 @@
-<!DOCTYPE html>
-<!--[if lt IE 7]>      <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
-<!--[if IE 7]>         <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
-<!--[if IE 8]>         <html class="no-js lt-ie9"> <![endif]-->
-<!--[if gt IE 8]><!-->
-<html class="no-js" xmlns="http://www.w3.org/1999/html" xmlns="http://www.w3.org/1999/html"> <!--<![endif]-->
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
-    <title>Docker - the Linux container runtime</title>
-
-    <meta name="description" content="Docker encapsulates heterogeneous payloads in standard containers">
-    <meta name="viewport" content="width=device-width">
-
-    <!-- twitter bootstrap -->
-    <link rel="stylesheet" href="../_static/css/bootstrap.min.css">
-    <link rel="stylesheet" href="../_static/css/bootstrap-responsive.min.css">
-
-    <!-- main style file -->
-    <link rel="stylesheet" href="../_static/css/main.css">
-
-    <!-- vendor scripts -->
-    <script src="../_static/js/vendor/jquery-1.9.1.min.js" type="text/javascript" ></script>
-    <script src="../_static/js/vendor/modernizr-2.6.2-respond-1.1.0.min.js" type="text/javascript" ></script>
-
-</head>
-
-
-<body>
-
-<div class="navbar navbar-fixed-top">
-    <div class="navbar-dotcloud">
-        <div class="container" style="text-align: center;">
-
-            <div style="float: right" class="pull-right">
-                <ul class="nav">
-                    <li><a href="../">Introduction</a></li>
-                    <li class="active"><a href="./">Getting started</a></li>
-                    <li class=""><a href="http://docs.docker.io/en/latest/concepts/containers/">Documentation</a></li>
-                </ul>
-
-                <div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">
-                    <a class="twitter" href="http://twitter.com/getdocker">Twitter</a>
-                    <a class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
-                </div>
-            </div>
-
-            <div style="margin-left: -12px; float: left;">
-                <a href="../index.html"><img style="margin-top: 12px; height: 38px" src="../_static/img/docker-letters-logo.gif"></a>
-            </div>
-        </div>
-    </div>
-</div>
-
-
-<div class="container">
-    <div class="row">
-        <div class="span12 titlebar"><h1 class="pageheader">GETTING STARTED</h1>
-        </div>
-    </div>
-
-</div>
-
-<div class="container">
-    <div class="alert alert-info">
-        <strong>Docker is still under heavy development.</strong> It should not yet be used in production. Check <a href="http://github.com/dotcloud/docker">the repo</a> for recent progress.
-    </div>
-    <div class="row">
-        <div class="span6">
-            <section class="contentblock">
-                <h2>
-                    <a name="installing-on-ubuntu-1204-and-1210" class="anchor" href="#installing-on-ubuntu-1204-and-1210"><span class="mini-icon mini-icon-link"></span>
-                    </a>Installing on Ubuntu</h2>
-
-                    <p><strong>Requirements</strong></p>
-                    <ul>
-                        <li>Ubuntu 12.04 (LTS) (64-bit)</li>
-                        <li> or Ubuntu 12.10 (quantal) (64-bit)</li>
-                    </ul>
-                <ol>
-                    <li>
-                    <p><strong>Install dependencies</strong></p>
-                    The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
-                    <pre>sudo apt-get install linux-image-extra-`uname -r`</pre>
-
-
-                    </li>
-                    <li>
-                        <p><strong>Install Docker</strong></p>
-                        <p>Add the Ubuntu PPA (Personal Package Archive) sources to your apt sources list, update and install.</p>
-                        <p>You may see some warnings that the GPG keys cannot be verified.</p>
-                        <div class="highlight">
-                            <pre>sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"</pre>
-                            <pre>sudo apt-get update</pre>
-                            <pre>sudo apt-get install lxc-docker</pre>
-                        </div>
-
-
-                    </li>
-
-                    <li>
-                        <p><strong>Run!</strong></p>
-
-                        <div class="highlight">
-                            <pre>docker run -i -t ubuntu /bin/bash</pre>
-                        </div>
-                    </li>
-                    Continue with the <a href="http://docs.docker.io/en/latest/examples/hello_world/">Hello world</a> example.
-                </ol>
-            </section>
-
-            <section class="contentblock">
-                <h2>Contributing to Docker</h2>
-
-                <p>Want to hack on Docker? Awesome! We have some <a href="http://docs.docker.io/en/latest/contributing/contributing/">instructions to get you started</a>. They are probably not perfect, please let us know if anything feels wrong or incomplete.</p>
-            </section>
-
-        </div>
-        <div class="span6">
-            <section class="contentblock">
-                <h2>Quick install on other operating systems</h2>
-                <p><strong>For other operating systems we recommend and provide a streamlined install with virtualbox,
-                    vagrant and an Ubuntu virtual machine.</strong></p>
-
-                <ul>
-                    <li><a href="http://docs.docker.io/en/latest/installation/vagrant/">Mac OS X and other linuxes</a></li>
-                    <li><a href="http://docs.docker.io/en/latest/installation/windows/">Windows</a></li>
-                </ul>
-
-            </section>
-
-            <section class="contentblock">
-                <h2>More resources</h2>
-                <ul>
-                    <li><a href="irc://chat.freenode.net#docker">IRC: docker on freenode</a></li>
-                    <li><a href="http://www.github.com/dotcloud/docker">Github</a></li>
-                    <li><a href="http://stackoverflow.com/tags/docker/">Ask questions on Stackoverflow</a></li>
-                    <li><a href="http://twitter.com/getdocker/">Join the conversation on Twitter</a></li>
-                </ul>
-            </section>
-
-
-            <section class="contentblock">
-                <div id="wufoo-z7x3p3">
-                    Fill out my <a href="http://dotclouddocker.wufoo.com/forms/z7x3p3">online form</a>.
-                </div>
-                <script type="text/javascript">var z7x3p3;(function(d, t) {
-                    var s = d.createElement(t), options = {
-                        'userName':'dotclouddocker',
-                        'formHash':'z7x3p3',
-                        'autoResize':true,
-                        'height':'577',
-                        'async':true,
-                        'header':'show'};
-                    s.src = ('https:' == d.location.protocol ? 'https://' : 'http://') + 'wufoo.com/scripts/embed/form.js';
-                    s.onload = s.onreadystatechange = function() {
-                        var rs = this.readyState; if (rs) if (rs != 'complete') if (rs != 'loaded') return;
-                        try { z7x3p3 = new WufooForm();z7x3p3.initialize(options);z7x3p3.display(); } catch (e) {}};
-                    var scr = d.getElementsByTagName(t)[0], par = scr.parentNode; par.insertBefore(s, scr);
-                })(document, 'script');</script>
-            </section>
-
-        </div>
-    </div>
-</div>
-
-
-<div class="container">
-    <footer id="footer" class="footer">
-        <div class="row">
-            <div class="span12 social">
-
-                Docker is a project by <a href="http://www.dotcloud.com">dotCloud</a>
-
-            </div>
-        </div>
-
-        <div class="row">
-            <div class="emptyspace" style="height: 40px">
-
-            </div>
-        </div>
-
-    </footer>
-</div>
-
-
-<!-- bootstrap javascipts -->
-<script src="../_static/js/vendor/bootstrap.min.js" type="text/javascript"></script>
-
-<!-- Google analytics -->
-<script type="text/javascript">
-
-    var _gaq = _gaq || [];
-    _gaq.push(['_setAccount', 'UA-6096819-11']);
-    _gaq.push(['_setDomainName', 'docker.io']);
-    _gaq.push(['_setAllowLinker', true]);
-    _gaq.push(['_trackPageview']);
-
-    (function() {
-        var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-        ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-        var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-    })();
-
-</script>
-
-
-</body>
-</html>

+ 0 - 314
docs/sources/index.html

@@ -1,314 +0,0 @@
-<!DOCTYPE html>
-<!--[if lt IE 7]>      <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
-<!--[if IE 7]>         <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
-<!--[if IE 8]>         <html class="no-js lt-ie9"> <![endif]-->
-<!--[if gt IE 8]><!-->
-<html class="no-js" xmlns="http://www.w3.org/1999/html"> <!--<![endif]-->
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
-    <meta name="google-site-verification" content="UxV66EKuPe87dgnH1sbrldrx6VsoWMrx5NjwkgUFxXI" />
-    <title>Docker - the Linux container engine</title>
-
-    <meta name="description" content="Docker encapsulates heterogeneous payloads in standard containers">
-    <meta name="viewport" content="width=device-width">
-
-    <!-- twitter bootstrap -->
-    <link rel="stylesheet" href="_static/css/bootstrap.min.css">
-    <link rel="stylesheet" href="_static/css/bootstrap-responsive.min.css">
-
-    <!-- main style file -->
-    <link rel="stylesheet" href="_static/css/main.css">
-
-    <!-- vendor scripts -->
-    <script src="_static/js/vendor/jquery-1.9.1.min.js" type="text/javascript" ></script>
-    <script src="_static/js/vendor/modernizr-2.6.2-respond-1.1.0.min.js" type="text/javascript" ></script>
-
-    <style>
-        .indexlabel {
-            float: left;
-            width: 150px;
-            display: block;
-            padding: 10px 20px 10px;
-            font-size: 20px;
-            font-weight: 200;
-            background-color: #a30000;
-            color: white;
-            height: 22px;
-        }
-        .searchbutton {
-            font-size: 20px;
-            height: 40px;
-        }
-
-        .debug {
-            border: 1px red dotted;
-        }
-
-    </style>
-
-</head>
-
-
-<body>
-
-<div class="navbar navbar-fixed-top">
-    <div class="navbar-dotcloud">
-        <div class="container" style="text-align: center;">
-
-            <div class="pull-right" >
-                <ul class="nav">
-                    <li class="active"><a href="./">Introduction</a></li>
-                    <li ><a href="gettingstarted/">Getting started</a></li>
-                    <li class=""><a href="http://docs.docker.io/en/latest/concepts/containers/">Documentation</a></li>
-                </ul>
-
-                <div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">
-                    <a class="twitter" href="http://twitter.com/getdocker">Twitter</a>
-                    <a class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
-                </div>
-            </div>
-        </div>
-    </div>
-</div>
-
-
-<div class="container" style="margin-top: 30px;">
-    <div class="row">
-
-        <div class="span12">
-            <section class="contentblock header">
-
-                <div class="span5" style="margin-bottom: 15px;">
-                    <div style="text-align: center;" >
-                        <img src="_static/img/docker_letters_500px.png">
-
-                        <h2>The Linux container engine</h2>
-                    </div>
-
-                    <div style="display: block; text-align: center; margin-top: 20px;">
-
-                        <h5>
-                            Docker is an open-source engine which automates the deployment of applications as highly portable, self-sufficient containers which are independent of hardware, language, framework, packaging system and hosting provider.
-                        </h5>
-
-                    </div>
-
-
-                    <div style="display: block; text-align: center; margin-top: 30px;">
-                        <a class="btn btn-custom btn-large" href="gettingstarted/">Let's get started</a>
-                    </div>
-
-                </div>
-
-                <div class="span6" >
-                    <div class="js-video" >
-                        <iframe width="600" height="360" src="http://www.youtube.com/embed/wW9CAH9nSLs?feature=player_detailpage&rel=0&modestbranding=1&start=11" frameborder="0" allowfullscreen></iframe>
-                    </div>
-                </div>
-
-                <br style="clear: both"/>
-            </section>
-        </div>
-    </div>
-</div>
-
-<div class="container">
-    <div class="row">
-
-        <div class="span6">
-            <section class="contentblock">
-                <h4>Heterogeneous payloads</h4>
-                <p>Any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.</p>
-                <h4>Any server</h4>
-                <p>Docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.</p>
-                <h4>Isolation</h4>
-                <p>Docker isolates processes from each other and from the underlying host, using lightweight containers.</p>
-                <h4>Repeatability</h4>
-                <p>Because each container is isolated in its own filesystem, they behave the same regardless of where, when, and alongside what they run.</p>
-            </section>
-        </div>
-        <div class="span6">
-            <section class="contentblock">
-                <h1>New! Docker Index</h1>
-                On the Docker Index you can find and explore pre-made container images. It allows you to share your images and download them.
-
-                <br><br>
-                <a href="https://index.docker.io" target="_blank">
-                    <div class="indexlabel">
-                        DOCKER index
-                    </div>
-                </a>
-                &nbsp;
-                <input type="button" class="searchbutton" type="submit" value="Search images"
-                       onClick="window.open('https://index.docker.io')" />
-
-            </section>
-            <section class="contentblock">
-                <div id="wufoo-z7x3p3">
-                    Fill out my <a href="http://dotclouddocker.wufoo.com/forms/z7x3p3">online form</a>.
-                </div>
-                <script type="text/javascript">var z7x3p3;(function(d, t) {
-                    var s = d.createElement(t), options = {
-                        'userName':'dotclouddocker',
-                        'formHash':'z7x3p3',
-                        'autoResize':true,
-                        'height':'577',
-                        'async':true,
-                        'header':'show'};
-                    s.src = ('https:' == d.location.protocol ? 'https://' : 'http://') + 'wufoo.com/scripts/embed/form.js';
-                    s.onload = s.onreadystatechange = function() {
-                        var rs = this.readyState; if (rs) if (rs != 'complete') if (rs != 'loaded') return;
-                        try { z7x3p3 = new WufooForm();z7x3p3.initialize(options);z7x3p3.display(); } catch (e) {}};
-                    var scr = d.getElementsByTagName(t)[0], par = scr.parentNode; par.insertBefore(s, scr);
-                })(document, 'script');</script>
-            </section>
-        </div>
-    </div>
-
-</div>
-
-<style>
-    .twitterblock {
-        min-height: 75px;
-    }
-
-    .twitterblock img {
-        float: left;
-        margin-right: 10px;
-    }
-
-</style>
-
-
-<div class="container">
-    <div class="row">
-        <div class="span6">
-            <section class="contentblock twitterblock">
-                <img src="https://twimg0-a.akamaihd.net/profile_images/2491994496/rbevyyq6ykp6bnoby2je_bigger.jpeg">
-                <em>John Willis @botchagalupe:</em> IMHO docker is to paas what chef was to Iaas 4 years ago
-            </section>
-        </div>
-        <div class="span6">
-            <section class="contentblock twitterblock">
-                <img src="https://twimg0-a.akamaihd.net/profile_images/3348427561/9d7f08f1e103a16c8debd169301b9944_bigger.jpeg">
-                <em>John Feminella ‏@superninjarobot:</em> So, @getdocker is pure excellence. If you've ever wished for arbitrary, PaaS-agnostic, lxc/aufs Linux containers, this is your jam!
-            </section>
-        </div>
-    </div>
-    <div class="row">
-        <div class="span6">
-            <section class="contentblock twitterblock">
-                <img src="https://si0.twimg.com/profile_images/3408403010/4496ccdd14e9b7285eca04c31a740207_bigger.jpeg">
-                <em>David Romulan ‏@destructuring:</em> I haven't had this much fun since AWS
-            </section>
-        </div>
-        <div class="span6">
-            <section class="contentblock twitterblock">
-                <img src="https://si0.twimg.com/profile_images/780893320/My_Avatar_bigger.jpg">
-                <em>Ricardo Gladwell ‏@rgladwell:</em> wow @getdocker is either amazing or totally stupid
-            </section>
-        </div>
-
-    </div>
-</div>
-
-<div class="container">
-    <div class="row">
-        <div class="span6">
-
-            <section class="contentblock">
-
-                <h2>Notable features</h2>
-
-                <ul>
-                    <li>Filesystem isolation: each process container runs in a completely separate root filesystem.</li>
-                    <li>Resource isolation: system resources like cpu and memory can be allocated differently to each process container, using cgroups.</li>
-                    <li>Network isolation: each process container runs in its own network namespace, with a virtual interface and IP address of its own.</li>
-                    <li>Copy-on-write: root filesystems are created using copy-on-write, which makes deployment extremeley fast, memory-cheap and disk-cheap.</li>
-                    <li>Logging: the standard streams (stdout/stderr/stdin) of each process container is collected and logged for real-time or batch retrieval.</li>
-                    <li>Change management: changes to a container's filesystem can be committed into a new image and re-used to create more containers. No templating or manual configuration required.</li>
-                    <li>Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.</li>
-                </ul>
-
-                <h2>Under the hood</h2>
-
-                <p>Under the hood, Docker is built on the following components:</p>
-
-                <ul>
-                    <li>The <a href="http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c">cgroup</a> and <a href="http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part">namespacing</a> capabilities of the Linux kernel;</li>
-                    <li><a href="http://aufs.sourceforge.net/aufs.html">AUFS</a>, a powerful union filesystem with copy-on-write capabilities;</li>
-                    <li>The <a href="http://golang.org">Go</a> programming language;</li>
-                    <li><a href="http://lxc.sourceforge.net/">lxc</a>, a set of convenience scripts to simplify the creation of linux containers.</li>
-                </ul>
-
-                <h2>Who started it</h2>
-                <p>
-                    Docker is an open-source implementation of the deployment engine which powers <a href="http://dotcloud.com">dotCloud</a>, a popular Platform-as-a-Service.</p>
-
-                <p>It benefits directly from the experience accumulated over several years of large-scale operation and support of hundreds of thousands
-                    of applications and databases.
-                </p>
-
-            </section>
-        </div>
-
-        <div class="span6">
-
-
-            <section class="contentblock">
-                <h3 id="twitter">Twitter</h3>
-                <a class="twitter-timeline" href="https://twitter.com/getdocker" data-widget-id="312730839718957056">Tweets by @getdocker</a>
-                <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src="//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");</script>
-            </section>
-
-        </div>
-    </div>
-
-</div> <!-- end container -->
-
-
-<div class="container">
-    <footer id="footer" class="footer">
-        <div class="row">
-            <div class="span12">
-
-                Docker is a project by <a href="http://www.dotcloud.com">dotCloud</a>
-
-            </div>
-        </div>
-
-        <div class="row">
-            <div class="emptyspace" style="height: 40px">
-
-            </div>
-        </div>
-
-    </footer>
-</div>
-
-
-
-<!-- bootstrap javascipts -->
-<script src="_static/js/vendor/bootstrap.min.js" type="text/javascript"></script>
-
-<!-- Google analytics -->
-<script type="text/javascript">
-
-    var _gaq = _gaq || [];
-    _gaq.push(['_setAccount', 'UA-6096819-11']);
-    _gaq.push(['_setDomainName', 'docker.io']);
-    _gaq.push(['_setAllowLinker', true]);
-    _gaq.push(['_trackPageview']);
-
-    (function() {
-        var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-        ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-        var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-    })();
-
-</script>
-
-
-</body>
-</html>

+ 121 - 19
docs/sources/index.rst

@@ -1,25 +1,127 @@
-:title: docker documentation
-:description: docker documentation
-:keywords:
+:title: Introduction
+:description: An introduction to docker and standard containers?
+:keywords: containers, lxc, concepts, explanation
 
 
-Documentation
-=============
+.. _introduction:
 
 
-This documentation has the following resources:
+Introduction
+============
 
 
-.. toctree::
-   :maxdepth: 1
+Docker - The Linux container runtime
+------------------------------------
 
 
-   concepts/index
-   installation/index
-   examples/index
-   contributing/index
-   commandline/index
-   registry/index
-   index/index
-   builder/index
-   remote-api/index
-   faq
+Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
 
 
+Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
+
+
+- **Heterogeneous payloads** Any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.
+- **Any server** Docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
+- **Isolation** docker isolates processes from each other and from the underlying host, using lightweight containers.
+- **Repeatability** Because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
+
+.. image:: concepts/images/lego_docker.jpg
+
+
+What is a Standard Container?
+-----------------------------
+
+Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
+a format that is self-describing and portable, so that any compliant runtime can run it without extra dependency, regardless of the underlying machine and the contents of the container.
+
+The spec for Standard Containers is currently work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
+
+A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
+
+Standard operations
+~~~~~~~~~~~~~~~~~~~
+
+Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
+
+
+Content-agnostic
+~~~~~~~~~~~~~~~~~~~
+
+Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
+
+
+Infrastructure-agnostic
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
+
+
+Designed for automation
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well-suited for automation. In fact, you could say automation is their secret weapon.
+
+Many things that once required time-consuming and error-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune - and was entirely different depending on the facility and the type of goods.
+
+Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken. The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
+
+
+Industrial-grade delivery
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
+
+With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
+
+
+Standard Container Specification
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+(TODO)
+
+Image format
+~~~~~~~~~~~~
+
+Standard operations
+~~~~~~~~~~~~~~~~~~~
+
+-  Copy
+-  Run
+-  Stop
+-  Wait
+-  Commit
+-  Attach standard streams
+-  List filesystem changes
+-  ...
+
+Execution environment
+~~~~~~~~~~~~~~~~~~~~~
+
+Root filesystem
+^^^^^^^^^^^^^^^
+
+Environment variables
+^^^^^^^^^^^^^^^^^^^^^
+
+Process arguments
+^^^^^^^^^^^^^^^^^
+
+Networking
+^^^^^^^^^^
+
+Process namespacing
+^^^^^^^^^^^^^^^^^^^
+
+Resource limits
+^^^^^^^^^^^^^^^
+
+Process monitoring
+^^^^^^^^^^^^^^^^^^
+
+Logging
+^^^^^^^
+
+Signals
+^^^^^^^
+
+Pseudo-terminal allocation
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Security
+^^^^^^^^
 
 
-.. image:: http://www.docker.io/_static/lego_docker.jpg

+ 0 - 15
docs/sources/index/index.rst

@@ -1,15 +0,0 @@
-:title: Docker Index documentation
-:description: Documentation for docker Index
-:keywords: docker, index, api
-
-
-
-Index
-=====
-
-Contents:
-
-.. toctree::
-   :maxdepth: 2
-
-   search

+ 23 - 0
docs/sources/index/variable.rst

@@ -0,0 +1,23 @@
+=================================
+Docker Index Environment Variable
+=================================
+
+Variable
+--------
+
+.. code-block:: sh
+
+    DOCKER_INDEX_URL
+
+Setting this environment variable on the docker server will change the URL docker index.
+This address is used in commands such as ``docker login``, ``docker push`` and ``docker pull``.
+The docker daemon doesn't need to be restarted for this parameter to take effect.
+
+Example
+-------
+
+.. code-block:: sh
+
+    docker -d &
+    export DOCKER_INDEX_URL="https://index.docker.io"
+

+ 1 - 1
docs/sources/installation/amazon.rst

@@ -68,7 +68,7 @@ Docker can now be installed on Amazon EC2 with a single vagrant command. Vagrant
    If it stalls indefinitely on ``[default] Waiting for SSH to become available...``, Double check your default security
    If it stalls indefinitely on ``[default] Waiting for SSH to become available...``, Double check your default security
    zone on AWS includes rights to SSH (port 22) to your container.
    zone on AWS includes rights to SSH (port 22) to your container.
 
 
-   If you have an advanced AWS setup, you might want to have a look at the https://github.com/mitchellh/vagrant-aws
+   If you have an advanced AWS setup, you might want to have a look at https://github.com/mitchellh/vagrant-aws
 
 
 7. Connect to your machine
 7. Connect to your machine
 
 

+ 27 - 17
docs/sources/installation/binaries.rst

@@ -5,48 +5,58 @@ Binaries
 
 
   **Please note this project is currently under heavy development. It should not be used in production.**
   **Please note this project is currently under heavy development. It should not be used in production.**
 
 
+**This instruction set is meant for hackers who want to try out Docker on a variety of environments.**
 
 
 Right now, the officially supported distributions are:
 Right now, the officially supported distributions are:
 
 
-- Ubuntu 12.04 (precise LTS) (64-bit)
-- Ubuntu 12.10 (quantal) (64-bit)
+- :ref:`ubuntu_precise`
+- :ref:`ubuntu_raring`
 
 
 
 
-Install dependencies:
----------------------
+But we know people have had success running it under
+
+- Debian
+- Suse
+- :ref:`arch_linux`
+
 
 
-::
+Dependencies:
+-------------
 
 
-    sudo apt-get install lxc bsdtar
-    sudo apt-get install linux-image-extra-`uname -r`
+* 3.8 Kernel
+* AUFS filesystem support
+* lxc
+* bsdtar
 
 
-The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
 
 
-Install the docker binary:
+Get the docker binary:
+----------------------
 
 
-::
+.. code-block:: bash
 
 
     wget http://get.docker.io/builds/Linux/x86_64/docker-latest.tgz
     wget http://get.docker.io/builds/Linux/x86_64/docker-latest.tgz
     tar -xf docker-latest.tgz
     tar -xf docker-latest.tgz
-    sudo cp ./docker-latest/docker /usr/local/bin
-
-Note: docker currently only supports 64-bit Linux hosts.
 
 
 
 
 Run the docker daemon
 Run the docker daemon
 ---------------------
 ---------------------
 
 
-::
+.. code-block:: bash
 
 
-    sudo docker -d &
+    # start the docker in daemon mode from the directory you unpacked
+    sudo ./docker -d &
 
 
 
 
 Run your first container!
 Run your first container!
 -------------------------
 -------------------------
 
 
-::
+.. code-block:: bash
+
+    # check your docker version
+    ./docker version
 
 
-    docker run -i -t ubuntu /bin/bash
+    # run a container and open an interactive shell in the container
+    ./docker run -i -t ubuntu /bin/bash
 
 
 
 
 
 

+ 3 - 1
docs/sources/installation/index.rst

@@ -14,8 +14,10 @@ Contents:
 
 
    ubuntulinux
    ubuntulinux
    binaries
    binaries
-   archlinux
    vagrant
    vagrant
    windows
    windows
    amazon
    amazon
+   rackspace
+   archlinux
    upgrading
    upgrading
+   kernel

+ 149 - 0
docs/sources/installation/kernel.rst

@@ -0,0 +1,149 @@
+.. _kernel:
+
+Kernel Requirements
+===================
+
+  The officially supported kernel is the one recommended by the
+  :ref:`ubuntu_linux` installation path. It is the one that most developers
+  will use, and the one that receives the most attention from the core
+  contributors. If you decide to go with a different kernel and hit a bug,
+  please try to reproduce it with the official kernels first.
+
+If for some reason you cannot or do not want to use the "official" kernels,
+here is some technical background about the features (both optional and
+mandatory) that docker needs to run successfully.
+
+In short, you need kernel version 3.8 (or above), compiled to include
+`AUFS support <http://aufs.sourceforge.net/>`_. Of course, you need to
+enable cgroups and namespaces.
+
+
+Namespaces and Cgroups
+----------------------
+
+You need to enable namespaces and cgroups, to the extend of what is needed
+to run LXC containers. Technically, while namespaces have been introduced
+in the early 2.6 kernels, we do not advise to try any kernel before 2.6.32
+to run LXC containers. Note that 2.6.32 has some documented issues regarding
+network namespace setup and teardown; those issues are not a risk if you
+run containers in a private environment, but can lead to denial-of-service
+attacks if you want to run untrusted code in your containers. For more details,
+see `[LP#720095 <https://bugs.launchpad.net/ubuntu/+source/linux/+bug/720095>`_.
+
+Kernels 2.6.38, and every version since 3.2, have been deployed successfully
+to run containerized production workloads. Feature-wise, there is no huge
+improvement between 2.6.38 and up to 3.6 (as far as docker is concerned!).
+
+Starting with version 3.7, the kernel has basic support for
+`Checkpoint/Restore In Userspace <http://criu.org/>`_, which is not used by
+docker at this point, but allows to suspend the state of a container to
+disk and resume it later.
+
+Version 3.8 provides improvements in stability, which are deemed necessary
+for the operation of docker. Versions 3.2 to 3.5 have been shown to
+exhibit a reproducible bug (for more details, see issue
+`#407 <https://github.com/dotcloud/docker/issues/407>`_).
+
+Version 3.8 also brings better support for the
+`setns() syscall <http://lwn.net/Articles/531381/>`_ -- but this should not
+be a concern since docker does not leverage on this feature for now.
+
+If you want a technical overview about those concepts, you might
+want to check those articles on dotCloud's blog:
+`about namespaces <http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part>`_
+and `about cgroups <http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c>`_.
+
+
+Important Note About Pre-3.8 Kernels
+------------------------------------
+
+As mentioned above, kernels before 3.8 are not stable when used with docker.
+In some circumstances, you will experience kernel "oopses", or even crashes.
+The symptoms include:
+
+- a container being killed in the middle of an operation (e.g. an ``apt-get``
+  command doesn't complete);
+- kernel messages including mentioning calls to ``mntput`` or
+  ``d_hash_and_lookup``;
+- kernel crash causing the machine to freeze for a few minutes, or even
+  completely.
+
+While it is still possible to use older kernels for development, it is
+really not advised to do so.
+
+Docker checks the kernel version when it starts, and emits a warning if it
+detects something older than 3.8.
+
+See issue `#407 <https://github.com/dotcloud/docker/issues/407>`_ for details.
+
+
+Extra Cgroup Controllers
+------------------------
+
+Most control groups can be enabled or disabled individually. For instance,
+you can decide that you do not want to compile support for the CPU or memory
+controller. In some cases, the feature can be enabled or disabled at boot
+time. It is worth mentioning that some distributions (like Debian) disable
+"expensive" features, like the memory controller, because they can have
+a significant performance impact.
+
+In the specific case of the memory cgroup, docker will detect if the cgroup
+is available or not. If it's not, it will print a warning, and it won't
+use the feature. If you want to enable that feature -- read on!
+
+
+Memory and Swap Accounting on Debian/Ubuntu
+-------------------------------------------
+
+If you use Debian or Ubuntu kernels, and want to enable memory and swap
+accounting, you must add the following command-line parameters to your kernel::
+
+    cgroup_enable=memory swapaccount
+
+On Debian or Ubuntu systems, if you use the default GRUB bootloader, you can
+add those parameters by editing ``/etc/default/grub`` and extending
+``GRUB_CMDLINE_LINUX``. Look for the following line::
+
+    GRUB_CMDLINE_LINUX=""
+
+And replace it by the following one::
+
+    GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount"
+
+Then run ``update-grub``, and reboot.
+
+
+AUFS
+----
+
+Docker currently relies on AUFS, an unioning filesystem.
+While AUFS is included in the kernels built by the Debian and Ubuntu
+distributions, is not part of the standard kernel. This means that if
+you decide to roll your own kernel, you will have to patch your
+kernel tree to add AUFS. The process is documented on
+`AUFS webpage <http://aufs.sourceforge.net/>`_.
+
+Note: the AUFS patch is fairly intrusive, but for the record, people have
+successfully applied GRSEC and AUFS together, to obtain hardened production
+kernels.
+
+If you want more information about that topic, there is an
+`article about AUFS on dotCloud's blog 
+<http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-34-a>`_.
+
+
+BTRFS, ZFS, OverlayFS...
+------------------------
+
+There is ongoing development on docker, to implement support for
+`BTRFS <http://en.wikipedia.org/wiki/Btrfs>`_
+(see github issue `#443 <https://github.com/dotcloud/docker/issues/443>`_).
+
+People have also showed interest for `ZFS <http://en.wikipedia.org/wiki/ZFS>`_
+(using e.g. `ZFS-on-Linux <http://zfsonlinux.org/>`_) and OverlayFS.
+The latter is functionally close to AUFS, and it might end up being included
+in the stock kernel; so it's a strong candidate!
+
+Would you like to `contribute
+<https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_
+support for your favorite filesystem?

+ 91 - 0
docs/sources/installation/rackspace.rst

@@ -0,0 +1,91 @@
+===============
+Rackspace Cloud
+===============
+
+  Please note this is a community contributed installation path. The only 'official' installation is using the
+  :ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
+
+
+Installing Docker on Ubuntu proviced by Rackspace is pretty straightforward, and you should mostly be able to follow the
+:ref:`ubuntu_linux` installation guide.
+
+**However, there is one caveat:**
+
+If you are using any linux not already shipping with the 3.8 kernel you will need to install it. And this is a little
+more difficult on Rackspace.
+
+Rackspace boots their servers using grub's menu.lst and does not like non 'virtual' packages (e.g. xen compatible)
+kernels there, although they do work. This makes ``update-grub`` to not have the expected result, and you need to
+set the kernel manually.
+
+**Do not attempt this on a production machine!**
+
+.. code-block:: bash
+
+    # update apt
+    apt-get update
+
+    # install the new kernel
+    apt-get install linux-generic-lts-raring
+
+
+Great, now you have kernel installed in /boot/, next is to make it boot next time.
+
+.. code-block:: bash
+
+    # find the exact names
+    find /boot/ -name '*3.8*'
+
+    # this should return some results
+
+
+Now you need to manually edit /boot/grub/menu.lst, you will find a section at the bottom with the existing options.
+Copy the top one and substitute the new kernel into that. Make sure the new kernel is on top, and double check kernel
+and initrd point to the right files.
+
+Make special care to double check the kernel and initrd entries.
+
+.. code-block:: bash
+
+    # now edit /boot/grub/menu.lst
+    vi /boot/grub/menu.lst
+
+It will probably look something like this:
+
+::
+
+     ## ## End Default Options ##
+
+     title		Ubuntu 12.04.2 LTS, kernel 3.8.x generic
+     root		(hd0)
+     kernel		/boot/vmlinuz-3.8.0-19-generic root=/dev/xvda1 ro quiet splash console=hvc0
+     initrd		/boot/initrd.img-3.8.0-19-generic
+
+     title		Ubuntu 12.04.2 LTS, kernel 3.2.0-38-virtual
+     root		(hd0)
+     kernel		/boot/vmlinuz-3.2.0-38-virtual root=/dev/xvda1 ro quiet splash console=hvc0
+     initrd		/boot/initrd.img-3.2.0-38-virtual
+
+     title		Ubuntu 12.04.2 LTS, kernel 3.2.0-38-virtual (recovery mode)
+     root		(hd0)
+     kernel		/boot/vmlinuz-3.2.0-38-virtual root=/dev/xvda1 ro quiet splash  single
+     initrd		/boot/initrd.img-3.2.0-38-virtual
+
+
+Reboot server (either via command line or console)
+
+.. code-block:: bash
+
+   # reboot
+
+Verify the kernel was updated
+
+.. code-block:: bash
+
+    uname -a
+    # Linux docker-12-04 3.8.0-19-generic #30~precise1-Ubuntu SMP Wed May 1 22:26:36 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux
+
+    # nice! 3.8.
+
+
+Now you can finish with the :ref:`ubuntu_linux` instructions.

+ 73 - 11
docs/sources/installation/ubuntulinux.rst

@@ -5,20 +5,39 @@ Ubuntu Linux
 
 
   **Please note this project is currently under heavy development. It should not be used in production.**
   **Please note this project is currently under heavy development. It should not be used in production.**
 
 
+Right now, the officially supported distribution are:
 
 
-Right now, the officially supported distributions are:
+- :ref:`ubuntu_precise`
+- :ref:`ubuntu_raring`
+
+Docker has the following dependencies
+
+* Linux kernel 3.8
+* AUFS file system support (we are working on BTRFS support as an alternative)
+
+.. _ubuntu_precise:
+
+Ubuntu Precise 12.04 (LTS) (64-bit)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This installation path should work at all times.
 
 
-- Ubuntu 12.04 (precise LTS) (64-bit)
-- Ubuntu 12.10 (quantal) (64-bit)
 
 
 Dependencies
 Dependencies
 ------------
 ------------
 
 
-The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
+**Linux kernel 3.8**
+
+Due to a bug in LXC docker works best on the 3.8 kernel. Precise comes with a 3.2 kernel, so we need to upgrade it. The kernel we install comes with AUFS built in.
+
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   sudo apt-get install linux-image-extra-`uname -r` lxc bsdtar
+   # install the backported kernel
+   sudo apt-get update && sudo apt-get install linux-image-3.8.0-19-generic
+
+   # reboot
+   sudo reboot
 
 
 
 
 Installation
 Installation
@@ -28,34 +47,77 @@ Docker is available as a Ubuntu PPA (Personal Package Archive),
 `hosted on launchpad  <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_
 `hosted on launchpad  <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_
 which makes installing Docker on Ubuntu very easy.
 which makes installing Docker on Ubuntu very easy.
 
 
+.. code-block:: bash
 
 
+   # Add the PPA sources to your apt sources list.
+   sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' > /etc/apt/sources.list.d/lxc-docker.list"
 
 
-Add the custom package sources to your apt sources list. Copy and paste the following lines at once.
+   # Update your sources, you will see a warning.
+   sudo apt-get update
+
+   # Install, you will see another warning that the package cannot be authenticated. Confirm install.
+   sudo apt-get install lxc-docker
+
+Verify it worked
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"
+   # download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
+   docker run -i -t ubuntu /bin/bash
+
+   # type 'exit' to exit
+
+
+**Done!**, now continue with the :ref:`hello_world` example.
 
 
+.. _ubuntu_raring:
 
 
-Update your sources. You will see a warning that GPG signatures cannot be verified.
+Ubuntu Raring 13.04 (64 bit)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Dependencies
+------------
+
+**AUFS filesystem support**
+
+Ubuntu Raring already comes with the 3.8 kernel, so we don't need to install it. However, not all systems
+have AUFS filesystem support enabled, so we need to install it.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
    sudo apt-get update
    sudo apt-get update
+   sudo apt-get install linux-image-extra-`uname -r`
+
+Installation
+------------
+
+Docker is available as a Ubuntu PPA (Personal Package Archive),
+`hosted on launchpad  <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_
+which makes installing Docker on Ubuntu very easy.
 
 
 
 
-Now install it, you will see another warning that the package cannot be authenticated. Confirm install.
+Add the custom package sources to your apt sources list.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    curl get.docker.io | sudo sh -x
+   # add the sources to your apt
+   sudo add-apt-repository ppa:dotcloud/lxc-docker
+
+   # update
+   sudo apt-get update
+
+   # install
+   sudo apt-get install lxc-docker
 
 
 
 
 Verify it worked
 Verify it worked
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   docker
+   # download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
+   docker run -i -t ubuntu /bin/bash
+
+   # type exit to exit
 
 
 
 
 **Done!**, now continue with the :ref:`hello_world` example.
 **Done!**, now continue with the :ref:`hello_world` example.

+ 30 - 15
docs/sources/installation/upgrading.rst

@@ -3,38 +3,53 @@
 Upgrading
 Upgrading
 ============
 ============
 
 
-These instructions are for upgrading your Docker binary for when you had a custom (non package manager) installation.
-If you istalled docker using apt-get, use that to upgrade.
+**These instructions are for upgrading Docker**
 
 
 
 
-Get the latest docker binary:
+After normal installation
+-------------------------
 
 
-::
+If you installed Docker normally using apt-get or used Vagrant, use apt-get to upgrade.
 
 
-  wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-latest.tgz
+.. code-block:: bash
 
 
+   # update your sources list
+   sudo apt-get update
 
 
+   # install the latest
+   sudo apt-get install lxc-docker
 
 
-Unpack it to your current dir
 
 
-::
+After manual installation
+-------------------------
 
 
-   tar -xf docker-latest.tgz
+If you installed the Docker binary
+
+
+.. code-block:: bash
+
+   # kill the running docker daemon
+   killall docker
 
 
 
 
-Stop your current daemon. How you stop your daemon depends on how you started it.
+.. code-block:: bash
 
 
-- If you started the daemon manually (``sudo docker -d``), you can just kill the process: ``killall docker``
-- If the process was started using upstart (the ubuntu startup daemon), you may need to use that to stop it
+   # get the latest binary
+   wget http://get.docker.io/builds/Linux/x86_64/docker-latest.tgz
 
 
 
 
-Start docker in daemon mode (-d) and disconnect (&) starting ./docker will start the version in your current dir rather
-than the one in your PATH.
+.. code-block:: bash
+
+   # Unpack it to your current dir
+   tar -xf docker-latest.tgz
+
 
 
-Now start the daemon
+Start docker in daemon mode (-d) and disconnect (&) starting ./docker will start the version in your current dir rather than a version which
+might reside in your path.
 
 
-::
+.. code-block:: bash
 
 
+   # start the new version
    sudo ./docker -d &
    sudo ./docker -d &
 
 
 
 

+ 3 - 7
docs/sources/installation/vagrant.rst

@@ -1,14 +1,10 @@
 
 
 .. _install_using_vagrant:
 .. _install_using_vagrant:
 
 
-Using Vagrant
-=============
+Using Vagrant (Mac, Linux)
+==========================
 
 
-  Please note this is a community contributed installation path. The only 'official' installation is using the
-  :ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
-
-**Requirements:**
-This guide will setup a new virtual machine with docker installed on your computer. This works on most operating
+This guide will setup a new virtualbox virtual machine with docker installed on your computer. This works on most operating
 systems, including MacOX, Windows, Linux, FreeBSD and others. If you can install these and have at least 400Mb RAM
 systems, including MacOX, Windows, Linux, FreeBSD and others. If you can install these and have at least 400Mb RAM
 to spare you should be good.
 to spare you should be good.
 
 

+ 2 - 2
docs/sources/installation/windows.rst

@@ -3,8 +3,8 @@
 :keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin
 :keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin
 
 
 
 
-Windows (with Vagrant)
-======================
+Using Vagrant (Windows)
+=======================
 
 
   Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
   Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
   may be out of date because it depends on some binaries to be updated and published
   may be out of date because it depends on some binaries to be updated and published

+ 0 - 6
docs/sources/nginx.conf

@@ -1,6 +0,0 @@
-
-# rule to redirect original links created when hosted on github pages
-rewrite ^/documentation/(.*).html http://docs.docker.io/en/latest/$1/ permanent;
-
-# rewrite the stuff which was on the current page
-rewrite ^/gettingstarted.html$ /gettingstarted/ permanent;

+ 0 - 15
docs/sources/registry/index.rst

@@ -1,15 +0,0 @@
-:title: docker Registry documentation
-:description: Documentation for docker Registry and Registry API
-:keywords: docker, registry, api, index
-
-
-
-Registry
-========
-
-Contents:
-
-.. toctree::
-   :maxdepth: 2
-
-   api

+ 0 - 15
docs/sources/remote-api/index.rst

@@ -1,15 +0,0 @@
-:title: docker Remote API documentation
-:description: Documentation for docker Remote API
-:keywords: docker, rest, api, http
-
-
-
-Remote API
-==========
-
-Contents:
-
-.. toctree::
-   :maxdepth: 2
-
-   api

+ 22 - 0
docs/sources/toctree.rst

@@ -0,0 +1,22 @@
+:title: docker documentation
+:description: docker documentation
+:keywords:
+
+Documentation
+=============
+
+This documentation has the following resources:
+
+.. toctree::
+   :titlesonly:
+
+   concepts/index
+   installation/index
+   use/index
+   examples/index
+   commandline/index
+   contributing/index
+   api/index
+   faq
+
+.. image:: concepts/images/lego_docker.jpg

+ 2 - 2
docs/sources/commandline/basics.rst → docs/sources/use/basics.rst

@@ -76,8 +76,8 @@ Expose a service on a TCP port
   echo "Daemon received: $(docker logs $JOB)"
   echo "Daemon received: $(docker logs $JOB)"
 
 
 
 
-Committing (saving) an image
------------------------------
+Committing (saving) a container state
+-------------------------------------
 
 
 Save your containers state to a container image, so the state can be re-used.
 Save your containers state to a container image, so the state can be re-used.
 
 

+ 4 - 3
docs/sources/builder/basics.rst → docs/sources/use/builder.rst

@@ -107,8 +107,7 @@ The `ENV` instruction sets the environment variable `<key>` to the value
 functionally equivalent to prefixing the command with `<key>=<value>`
 functionally equivalent to prefixing the command with `<key>=<value>`
 
 
 .. note::
 .. note::
-    The environment variables are local to the Dockerfile, they will not persist
-    when a container is run from the resulting image.
+    The environment variables will persist when a container is run from the resulting image.
 
 
 2.7 INSERT
 2.7 INSERT
 ----------
 ----------
@@ -122,6 +121,8 @@ curl was installed within the image.
 .. note::
 .. note::
     The path must include the file name.
     The path must include the file name.
 
 
+.. note::
+    This instruction has temporarily disabled
 
 
 3. Dockerfile Examples
 3. Dockerfile Examples
 ======================
 ======================
@@ -179,4 +180,4 @@ curl was installed within the image.
     # Will output something like ===> 695d7793cbe4
     # Will output something like ===> 695d7793cbe4
 
 
     # You'll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with
     # You'll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with
-    # /oink.
+    # /oink.

+ 19 - 0
docs/sources/use/index.rst

@@ -0,0 +1,19 @@
+:title: docker documentation
+:description: -- todo: change me
+:keywords: todo: change me
+
+
+
+Use
+========
+
+Contents:
+
+.. toctree::
+   :maxdepth: 1
+
+   basics
+   workingwithrepository
+   builder
+   puppet
+

+ 109 - 0
docs/sources/use/puppet.rst

@@ -0,0 +1,109 @@
+
+.. _install_using_puppet:
+
+Using Puppet
+=============
+
+.. note::
+
+   Please note this is a community contributed installation path. The only 'official' installation is using the
+   :ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
+
+Requirements
+------------
+
+To use this guide you'll need a working installation of Puppet from `Puppetlabs <https://www.puppetlabs.com>`_ .
+
+The module also currently uses the official PPA so only works with Ubuntu.
+
+Installation
+------------
+
+The module is available on the `Puppet Forge <https://forge.puppetlabs.com/garethr/docker/>`_
+and can be installed using the built-in module tool.
+
+   .. code-block:: bash
+
+      puppet module install garethr/docker
+
+It can also be found on `GitHub <https://www.github.com/garethr/garethr-docker>`_ 
+if you would rather download the source.
+
+Usage
+-----
+
+The module provides a puppet class for installing docker and two defined types
+for managing images and containers.
+
+Installation
+~~~~~~~~~~~~
+
+   .. code-block:: ruby
+
+      include 'docker'
+
+Images
+~~~~~~
+
+The next step is probably to install a docker image, for this we have a
+defined type which can be used like so:
+
+   .. code-block:: ruby
+
+      docker::image { 'base': }
+
+This is equivalent to running:
+
+   .. code-block:: bash
+
+      docker pull base
+
+Note that it will only if the image of that name does not already exist.
+This is downloading a large binary so on first run can take a while.
+For that reason this define turns off the default 5 minute timeout
+for exec. Note that you can also remove images you no longer need with:
+
+   .. code-block:: ruby
+
+      docker::image { 'base':
+        ensure => 'absent',
+      }
+
+Containers
+~~~~~~~~~~
+
+Now you have an image you can run commands within a container managed by
+docker.
+
+   .. code-block:: ruby
+
+      docker::run { 'helloworld':
+        image   => 'base',
+        command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"',
+      }
+
+This is equivalent to running the following command, but under upstart:
+
+   .. code-block:: bash
+
+      docker run -d base /bin/sh -c "while true; do echo hello world; sleep 1; done"
+
+Run also contains a number of optional parameters:
+
+   .. code-block:: ruby
+
+      docker::run { 'helloworld':
+        image        => 'base',
+        command      => '/bin/sh -c "while true; do echo hello world; sleep 1; done"',
+        ports        => ['4444', '4555'],
+        volumes      => ['/var/lib/counchdb', '/var/log'],
+        volumes_from => '6446ea52fbc9',
+        memory_limit => 10485760, # bytes 
+        username     => 'example',
+        hostname     => 'example.com',
+        env          => ['FOO=BAR', 'FOO2=BAR2'],
+        dns          => ['8.8.8.8', '8.8.4.4'],
+      }
+
+Note that ports, env, dns and volumes can be set with either a single string
+or as above with an array of values.

+ 75 - 0
docs/sources/use/workingwithrepository.rst

@@ -0,0 +1,75 @@
+.. _working_with_the_repository:
+
+Working with the repository
+============================
+
+
+Top-level repositories and user repositories
+--------------------------------------------
+
+Generally, there are two types of repositories: Top-level repositories which are controlled by the people behind
+Docker, and user repositories.
+
+* Top-level repositories can easily be recognized by not having a / (slash) in their name. These repositories can
+  generally be trusted.
+* User repositories always come in the form of <username>/<repo_name>. This is what your published images will look like.
+* User images are not checked, it is therefore up to you whether or not you trust the creator of this image.
+
+
+Find public images available on the index
+-----------------------------------------
+
+Seach by name, namespace or description
+
+.. code-block:: bash
+
+    docker search <value>
+
+
+Download them simply by their name
+
+.. code-block:: bash
+
+    docker pull <value>
+
+
+Very similarly you can search for and browse the index online on https://index.docker.io
+
+
+Connecting to the repository
+----------------------------
+
+You can create a user on the central docker repository online, or by running
+
+.. code-block:: bash
+
+    docker login
+
+
+If your username does not exist it will prompt you to also enter a password and your e-mail address. It will then
+automatically log you in.
+
+
+Committing a container to a named image
+---------------------------------------
+
+In order to commit to the repository it is required to have committed your container to an image with your namespace.
+
+.. code-block:: bash
+
+    # for example docker commit $CONTAINER_ID dhrp/kickassapp
+    docker commit <container_id> <your username>/<some_name>
+
+
+Pushing a container to the repository
+-----------------------------------------
+
+In order to push an image to the repository you need to have committed your container to a named image (see above)
+
+Now you can commit this image to the repository
+
+.. code-block:: bash
+
+    # for example docker push dhrp/kickassapp
+    docker push <image-name>
+

+ 78 - 60
docs/theme/docker/layout.html

@@ -66,7 +66,7 @@
                 <ul class="nav">
                 <ul class="nav">
                     <li><a href="http://www.docker.io/">Introduction</a></li>
                     <li><a href="http://www.docker.io/">Introduction</a></li>
                     <li><a href="http://www.docker.io/gettingstarted/">Getting started</a></li>
                     <li><a href="http://www.docker.io/gettingstarted/">Getting started</a></li>
-                    <li class="active"><a href="{{ pathto('concepts/containers/', 1) }}">Documentation</a></li>
+                    <li class="active"><a href="http://docs.docker.io/en/latest/">Documentation</a></li>
                 </ul>
                 </ul>
                 <div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">
                 <div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">
                     <a class="twitter" href="http://twitter.com/getdocker">Twitter</a>
                     <a class="twitter" href="http://twitter.com/getdocker">Twitter</a>
@@ -155,69 +155,87 @@
 
 
 
 
   <!-- script which should be loaded after everything else -->
   <!-- script which should be loaded after everything else -->
-  <script type="text/javascript">
+<script type="text/javascript">
 
 
 
 
-      var shiftWindow = function() {
-          scrollBy(0, -70);
-          console.log("window shifted")
-      };
-      window.addEventListener("hashchange", shiftWindow);
+    var shiftWindow = function() {
+        scrollBy(0, -70);
+        console.log("window shifted")
+    };
+    window.addEventListener("hashchange", shiftWindow);
 
 
-      function loadShift() {
-          if (window.location.hash) {
-              console.log("window has hash");
+    function loadShift() {
+        if (window.location.hash) {
+            console.log("window has hash");
             shiftWindow();
             shiftWindow();
-          }
-      }
-
-      $(window).load(function() {
-          loadShift();
-          console.log("late loadshift");
-      });
-
-      $(function(){
-
-          // sidebar accordian-ing
-          // don't apply on last object (it should be the FAQ)
-
-          var elements = $('.toctree-l2');
-          for (var i = 0; i < elements.length; i += 1) { var current = $(elements[i]); console.log(current); current.children('ul').hide();}
-
-
-          // set initial collapsed state
-          var elements = $('.toctree-l1');
-          for (var i = 0; i < elements.length; i += 1) {
-              var current = $(elements[i]);
-              if (current.hasClass('current')) {
-                  // do nothing
-              } else {
-                  // collapse children
-                  current.children('ul').hide();
-              }
-          }
-
-          // attached handler on click
-          $('.sidebar > ul > li > a').not(':last').click(function(){
-              if ($(this).parent().hasClass('current')) {
-                  $(this).parent().children('ul').slideUp(200, function() {
-                      $(this).parent().removeClass('current'); // toggle after effect
-                  });
-              } else {
-                  //$('.sidebar > ul > li > ul').slideUp(100);
-                  var current = $(this);
-                  setTimeout(function() {
-                      $('.sidebar > ul > li').removeClass('current');
-                      current.parent().addClass('current'); // toggle before effect
-                      current.parent().children('ul').hide();
-                      current.parent().children('ul').slideDown(200);
-                  }, 100);
-              }
-              return false;
-          });
-
-      });
-  </script>
+        }
+    }
+
+    $(window).load(function() {
+        loadShift();
+        console.log("late loadshift");
+    });
+
+    $(function(){
+
+        // sidebar accordian-ing
+        // don't apply on last object (it should be the FAQ)
+
+        // define an array to which all opened items should be added
+        var openmenus = [];
+
+        var elements = $('.toctree-l2');
+        for (var i = 0; i < elements.length; i += 1) { var current = $(elements[i]); current.children('ul').hide();}
+
+
+        // set initial collapsed state
+        var elements = $('.toctree-l1');
+        for (var i = 0; i < elements.length; i += 1) {
+            var current = $(elements[i]);
+            if (current.hasClass('current')) {
+
+                currentlink = current.children('a')[0].href;
+                openmenus.push(currentlink);
+
+                // do nothing
+            } else {
+                // collapse children
+                current.children('ul').hide();
+            }
+        }
+
+        // attached handler on click
+        $('.sidebar > ul > li > a').not(':last').click(function(){
+
+            var index = $.inArray(this.href, openmenus)
+
+            if (index > -1) {
+                console.log(index);
+                openmenus.splice(index, 1);
+
+
+                $(this).parent().children('ul').slideUp(200, function() {
+                    // $(this).parent().removeClass('current'); // toggle after effect
+                });
+            }
+            else {
+                openmenus.push(this.href);
+                console.log(this);
+
+                var current = $(this);
+
+                setTimeout(function() {
+                    $('.sidebar > ul > li').removeClass('current');
+                    current.parent().addClass('current'); // toggle before effect
+                    current.parent().children('ul').hide();
+                    current.parent().children('ul').slideDown(200);
+                }, 100);
+            }
+            return false;
+        });
+
+    });
+</script>
 
 
     <!-- Google analytics -->
     <!-- Google analytics -->
     <script type="text/javascript">
     <script type="text/javascript">

BIN
docs/theme/docker/static/img/hiring_graphic.png


+ 4 - 2
docs/website/gettingstarted/index.html

@@ -36,7 +36,7 @@
                 <ul class="nav">
                 <ul class="nav">
                     <li><a href="../">Introduction</a></li>
                     <li><a href="../">Introduction</a></li>
                     <li class="active"><a href="">Getting started</a></li>
                     <li class="active"><a href="">Getting started</a></li>
-                    <li class=""><a href="http://docs.docker.io/en/latest/concepts/introduction/">Documentation</a></li>
+                    <li class=""><a href="http://docs.docker.io/en/latest/">Documentation</a></li>
                 </ul>
                 </ul>
 
 
                 <div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">
                 <div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">
@@ -76,6 +76,7 @@
                     <ul>
                     <ul>
                         <li>Ubuntu 12.04 (LTS) (64-bit)</li>
                         <li>Ubuntu 12.04 (LTS) (64-bit)</li>
                         <li> or Ubuntu 12.10 (quantal) (64-bit)</li>
                         <li> or Ubuntu 12.10 (quantal) (64-bit)</li>
+                        <li>The 3.8 Linux Kernel</li>
                     </ul>
                     </ul>
                 <ol>
                 <ol>
                     <li>
                     <li>
@@ -105,7 +106,8 @@
                             <pre>docker run -i -t ubuntu /bin/bash</pre>
                             <pre>docker run -i -t ubuntu /bin/bash</pre>
                         </div>
                         </div>
                     </li>
                     </li>
-                    Continue with the <a href="http://docs.docker.io/en/latest/examples/hello_world/">Hello world</a> example.
+                    Continue with the <a href="http://docs.docker.io/en/latest/examples/hello_world/">Hello world</a> example.<br>
+                    Or check <a href="http://docs.docker.io/en/latest/installation/ubuntulinux/">more detailed installation instructions</a>
                 </ol>
                 </ol>
             </section>
             </section>
 
 

+ 18 - 2
docs/website/index.html

@@ -58,9 +58,9 @@
 
 
             <div class="pull-right" >
             <div class="pull-right" >
                 <ul class="nav">
                 <ul class="nav">
-                    <li class="active"><a href="../sources">Introduction</a></li>
+                    <li class="active"><a href="/">Introduction</a></li>
                     <li ><a href="gettingstarted">Getting started</a></li>
                     <li ><a href="gettingstarted">Getting started</a></li>
-                    <li class=""><a href="http://docs.docker.io/en/latest/concepts/introduction/">Documentation</a></li>
+                    <li class=""><a href="http://docs.docker.io/en/latest/">Documentation</a></li>
                 </ul>
                 </ul>
 
 
                 <div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">
                 <div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">
@@ -127,6 +127,22 @@
                 <h4>Repeatability</h4>
                 <h4>Repeatability</h4>
                 <p>Because each container is isolated in its own filesystem, they behave the same regardless of where, when, and alongside what they run.</p>
                 <p>Because each container is isolated in its own filesystem, they behave the same regardless of where, when, and alongside what they run.</p>
             </section>
             </section>
+            <section class="contentblock">
+                <div class="container">
+                <div class="span2" style="margin-left: 0" >
+                    <a href="http://dotcloud.theresumator.com/apply/mWjkD4/Software-Engineer.html" title="Job description"><img src="static/img/hiring_graphic.png" width="140px" style="margin-top: 25px"></a>
+                </div>
+                <div class="span4" style="margin-left: 0">
+                    <h4>Do you think it is cool to hack on docker? Join us!</h4>
+                    <ul>
+                        <li>Work on open source</li>
+                        <li>Program in Go</li>
+                    </ul>
+                    <a href="http://dotcloud.theresumator.com/apply/mWjkD4/Software-Engineer.html" title="Job description">read more</a>
+                </div>
+                </div>
+
+            </section>
         </div>
         </div>
         <div class="span6">
         <div class="span6">
             <section class="contentblock">
             <section class="contentblock">

+ 2 - 1
getKernelVersion_darwin.go

@@ -2,8 +2,9 @@ package docker
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/utils"
 )
 )
 
 
-func getKernelVersion() (*KernelVersionInfo, error) {
+func getKernelVersion() (*utils.KernelVersionInfo, error) {
 	return nil, fmt.Errorf("Kernel version detection is not available on darwin")
 	return nil, fmt.Errorf("Kernel version detection is not available on darwin")
 }
 }

+ 4 - 2
getKernelVersion_linux.go

@@ -2,12 +2,14 @@ package docker
 
 
 import (
 import (
 	"bytes"
 	"bytes"
+	"github.com/dotcloud/docker/utils"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 	"syscall"
 	"syscall"
 )
 )
 
 
-func getKernelVersion() (*KernelVersionInfo, error) {
+// FIXME: Move this to utils package
+func getKernelVersion() (*utils.KernelVersionInfo, error) {
 	var (
 	var (
 		uts                  syscall.Utsname
 		uts                  syscall.Utsname
 		flavor               string
 		flavor               string
@@ -60,7 +62,7 @@ func getKernelVersion() (*KernelVersionInfo, error) {
 		flavor = ""
 		flavor = ""
 	}
 	}
 
 
-	return &KernelVersionInfo{
+	return &utils.KernelVersionInfo{
 		Kernel: kernel,
 		Kernel: kernel,
 		Major:  major,
 		Major:  major,
 		Minor:  minor,
 		Minor:  minor,

+ 20 - 6
graph.go

@@ -3,9 +3,10 @@ package docker
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/registry"
+	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
-	"net/http"
 	"os"
 	"os"
 	"path"
 	"path"
 	"path/filepath"
 	"path/filepath"
@@ -17,8 +18,7 @@ import (
 // A Graph is a store for versioned filesystem images and the relationship between them.
 // A Graph is a store for versioned filesystem images and the relationship between them.
 type Graph struct {
 type Graph struct {
 	Root         string
 	Root         string
-	idIndex      *TruncIndex
-	httpClient   *http.Client
+	idIndex      *utils.TruncIndex
 	checksumLock map[string]*sync.Mutex
 	checksumLock map[string]*sync.Mutex
 	lockSumFile  *sync.Mutex
 	lockSumFile  *sync.Mutex
 	lockSumMap   *sync.Mutex
 	lockSumMap   *sync.Mutex
@@ -37,7 +37,7 @@ func NewGraph(root string) (*Graph, error) {
 	}
 	}
 	graph := &Graph{
 	graph := &Graph{
 		Root:         abspath,
 		Root:         abspath,
-		idIndex:      NewTruncIndex(),
+		idIndex:      utils.NewTruncIndex(),
 		checksumLock: make(map[string]*sync.Mutex),
 		checksumLock: make(map[string]*sync.Mutex),
 		lockSumFile:  &sync.Mutex{},
 		lockSumFile:  &sync.Mutex{},
 		lockSumMap:   &sync.Mutex{},
 		lockSumMap:   &sync.Mutex{},
@@ -122,7 +122,7 @@ func (graph *Graph) Create(layerData Archive, container *Container, comment, aut
 		img.Container = container.Id
 		img.Container = container.Id
 		img.ContainerConfig = *container.Config
 		img.ContainerConfig = *container.Config
 	}
 	}
-	if err := graph.Register(layerData, true, img); err != nil {
+	if err := graph.Register(layerData, layerData != nil, img); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	go img.Checksum()
 	go img.Checksum()
@@ -174,7 +174,7 @@ func (graph *Graph) TempLayerArchive(id string, compression Compression, output
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return NewTempArchive(ProgressReader(ioutil.NopCloser(archive), 0, output, "Buffering to disk %v/%v (%v)"), tmp.Root)
+	return NewTempArchive(utils.ProgressReader(ioutil.NopCloser(archive), 0, output, "Buffering to disk %v/%v (%v)"), tmp.Root)
 }
 }
 
 
 // Mktemp creates a temporary sub-directory inside the graph's filesystem.
 // Mktemp creates a temporary sub-directory inside the graph's filesystem.
@@ -333,3 +333,17 @@ func (graph *Graph) storeChecksums(checksums map[string]string) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
+
+func (graph *Graph) UpdateChecksums(newChecksums map[string]*registry.ImgData) error {
+	graph.lockSumFile.Lock()
+	defer graph.lockSumFile.Unlock()
+
+	localChecksums, err := graph.getStoredChecksums()
+	if err != nil {
+		return err
+	}
+	for id, elem := range newChecksums {
+		localChecksums[id] = elem.Checksum
+	}
+	return graph.storeChecksums(localChecksums)
+}

+ 2 - 1
graph_test.go

@@ -4,6 +4,7 @@ import (
 	"archive/tar"
 	"archive/tar"
 	"bytes"
 	"bytes"
 	"errors"
 	"errors"
+	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
@@ -155,7 +156,7 @@ func TestDeletePrefix(t *testing.T) {
 	graph := tempGraph(t)
 	graph := tempGraph(t)
 	defer os.RemoveAll(graph.Root)
 	defer os.RemoveAll(graph.Root)
 	img := createTestImage(graph, t)
 	img := createTestImage(graph, t)
-	if err := graph.Delete(TruncateId(img.Id)); err != nil {
+	if err := graph.Delete(utils.TruncateId(img.Id)); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	assertNImages(graph, t, 0)
 	assertNImages(graph, t, 0)

+ 1 - 1
hack/dockerbuilder/Dockerfile

@@ -6,7 +6,7 @@ run	apt-get update
 run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd
 run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd
 run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
 run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
 # Packages required to checkout and build docker
 # Packages required to checkout and build docker
-run	curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.0.3.linux-amd64.tar.gz
+run	curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.linux-amd64.tar.gz
 run	tar -C /usr/local -xzf /go.tar.gz
 run	tar -C /usr/local -xzf /go.tar.gz
 run	echo "export PATH=$PATH:/usr/local/go/bin" > /.bashrc
 run	echo "export PATH=$PATH:/usr/local/go/bin" > /.bashrc
 run	echo "export PATH=$PATH:/usr/local/go/bin" > /.bash_profile
 run	echo "export PATH=$PATH:/usr/local/go/bin" > /.bash_profile

+ 14 - 1
image.go

@@ -6,6 +6,7 @@ import (
 	"encoding/hex"
 	"encoding/hex"
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"log"
 	"log"
@@ -195,7 +196,7 @@ func (image *Image) Changes(rw string) ([]Change, error) {
 }
 }
 
 
 func (image *Image) ShortId() string {
 func (image *Image) ShortId() string {
-	return TruncateId(image.Id)
+	return utils.TruncateId(image.Id)
 }
 }
 
 
 func ValidateId(id string) error {
 func ValidateId(id string) error {
@@ -383,3 +384,15 @@ func (img *Image) getVirtualSize(size int64) int64 {
 	size += parentImage.Size
 	size += parentImage.Size
 	return parentImage.getVirtualSize(size)
 	return parentImage.getVirtualSize(size)
 }
 }
+
+// Build an Image object from raw json data
+func NewImgJson(src []byte) (*Image, error) {
+	ret := &Image{}
+
+	utils.Debugf("Json string: {%s}\n", src)
+	// FIXME: Is there a cleaner way to "purify" the input json?
+	if err := json.Unmarshal(src, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}

+ 3 - 0
lxc_template.go

@@ -96,6 +96,9 @@ lxc.cgroup.memory.soft_limit_in_bytes = {{.Config.Memory}}
 lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}
 lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}
 {{end}}
 {{end}}
 {{end}}
 {{end}}
+{{if .Config.CpuShares}}
+lxc.cgroup.cpu.shares = {{.Config.CpuShares}}
+{{end}}
 `
 `
 
 
 var LxcTemplateCompiled *template.Template
 var LxcTemplateCompiled *template.Template

+ 11 - 10
network.go

@@ -4,6 +4,7 @@ import (
 	"encoding/binary"
 	"encoding/binary"
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"log"
 	"log"
 	"net"
 	"net"
@@ -97,7 +98,7 @@ func checkRouteOverlaps(dockerNetwork *net.IPNet) error {
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	Debugf("Routes:\n\n%s", output)
+	utils.Debugf("Routes:\n\n%s", output)
 	for _, line := range strings.Split(output, "\n") {
 	for _, line := range strings.Split(output, "\n") {
 		if strings.Trim(line, "\r\n\t ") == "" || strings.Contains(line, "default") {
 		if strings.Trim(line, "\r\n\t ") == "" || strings.Contains(line, "default") {
 			continue
 			continue
@@ -126,13 +127,13 @@ func CreateBridgeIface(ifaceName string) error {
 			ifaceAddr = addr
 			ifaceAddr = addr
 			break
 			break
 		} else {
 		} else {
-			Debugf("%s: %s", addr, err)
+			utils.Debugf("%s: %s", addr, err)
 		}
 		}
 	}
 	}
 	if ifaceAddr == "" {
 	if ifaceAddr == "" {
 		return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", ifaceName, ifaceName)
 		return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", ifaceName, ifaceName)
 	} else {
 	} else {
-		Debugf("Creating bridge %s with network %s", ifaceName, ifaceAddr)
+		utils.Debugf("Creating bridge %s with network %s", ifaceName, ifaceAddr)
 	}
 	}
 
 
 	if output, err := ip("link", "add", ifaceName, "type", "bridge"); err != nil {
 	if output, err := ip("link", "add", ifaceName, "type", "bridge"); err != nil {
@@ -239,22 +240,22 @@ func (mapper *PortMapper) Map(port int, dest net.TCPAddr) error {
 // proxy listens for socket connections on `listener`, and forwards them unmodified
 // proxy listens for socket connections on `listener`, and forwards them unmodified
 // to `proto:address`
 // to `proto:address`
 func proxy(listener net.Listener, proto, address string) error {
 func proxy(listener net.Listener, proto, address string) error {
-	Debugf("proxying to %s:%s", proto, address)
-	defer Debugf("Done proxying to %s:%s", proto, address)
+	utils.Debugf("proxying to %s:%s", proto, address)
+	defer utils.Debugf("Done proxying to %s:%s", proto, address)
 	for {
 	for {
-		Debugf("Listening on %s", listener)
+		utils.Debugf("Listening on %s", listener)
 		src, err := listener.Accept()
 		src, err := listener.Accept()
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
-		Debugf("Connecting to %s:%s", proto, address)
+		utils.Debugf("Connecting to %s:%s", proto, address)
 		dst, err := net.Dial(proto, address)
 		dst, err := net.Dial(proto, address)
 		if err != nil {
 		if err != nil {
 			log.Printf("Error connecting to %s:%s: %s", proto, address, err)
 			log.Printf("Error connecting to %s:%s: %s", proto, address, err)
 			src.Close()
 			src.Close()
 			continue
 			continue
 		}
 		}
-		Debugf("Connected to backend, splicing")
+		utils.Debugf("Connected to backend, splicing")
 		splice(src, dst)
 		splice(src, dst)
 	}
 	}
 	return nil
 	return nil
@@ -317,7 +318,7 @@ func (alloc *PortAllocator) runFountain() {
 
 
 // FIXME: Release can no longer fail, change its prototype to reflect that.
 // FIXME: Release can no longer fail, change its prototype to reflect that.
 func (alloc *PortAllocator) Release(port int) error {
 func (alloc *PortAllocator) Release(port int) error {
-	Debugf("Releasing %d", port)
+	utils.Debugf("Releasing %d", port)
 	alloc.lock.Lock()
 	alloc.lock.Lock()
 	delete(alloc.inUse, port)
 	delete(alloc.inUse, port)
 	alloc.lock.Unlock()
 	alloc.lock.Unlock()
@@ -325,7 +326,7 @@ func (alloc *PortAllocator) Release(port int) error {
 }
 }
 
 
 func (alloc *PortAllocator) Acquire(port int) (int, error) {
 func (alloc *PortAllocator) Acquire(port int) (int, error) {
-	Debugf("Acquiring %d", port)
+	utils.Debugf("Acquiring %d", port)
 	if port == 0 {
 	if port == 0 {
 		// Allocate a port from the fountain
 		// Allocate a port from the fountain
 		for port := range alloc.fountain {
 		for port := range alloc.fountain {

+ 43 - 19
packaging/debian/Makefile

@@ -1,35 +1,59 @@
+# Debian package Makefile
+#
+# Dependencies:  git debhelper build-essential autotools-dev devscripts golang
+# Notes:
+# Use 'make debian' to create the debian package
+# To create a specific version, use 'VERSION_TAG=v0.2.0 make debian'
+# GPG_KEY environment variable needs to contain a GPG private key for package
+# to be signed and uploaded to debian.
+# If GPG_KEY is not defined, make debian will create docker package and exit
+# with status code 2
+
 PKG_NAME=lxc-docker
 PKG_NAME=lxc-docker
-DOCKER_VERSION=$(shell head -1 changelog | awk 'match($$0, /\(.+\)/) {print substr($$0, RSTART+1, RLENGTH-4)}')
+ROOT_PATH=$(shell git rev-parse --show-toplevel)
 GITHUB_PATH=github.com/dotcloud/docker
 GITHUB_PATH=github.com/dotcloud/docker
-SOURCE_PKG=$(PKG_NAME)_$(DOCKER_VERSION).orig.tar.gz
-BUILD_SRC=${CURDIR}/../../build_src
+BUILD_SRC=build_src
+VERSION_TAG?=v$(shell sed -E 's/.+\((.+)-.+\).+/\1/;q' changelog)
+VERSION=$(shell echo ${VERSION_TAG} | cut -c2-)
+DOCKER_VERSION=${PKG_NAME}_${VERSION}
 
 
 all:
 all:
-	# Compile docker. Used by debian dpkg-buildpackage. 
+	# Compile docker. Used by debian dpkg-buildpackage.
 	cd src/${GITHUB_PATH}/docker; GOPATH=${CURDIR} go build
 	cd src/${GITHUB_PATH}/docker; GOPATH=${CURDIR} go build
 
 
 install:
 install:
-	# Used by debian dpkg-buildpackage 
+	# Used by debian dpkg-buildpackage
 	mkdir -p $(DESTDIR)/usr/bin
 	mkdir -p $(DESTDIR)/usr/bin
-	mkdir -p $(DESTDIR)/etc/init.d
-	install -m 0755 src/${GITHUB_PATH}/docker/docker $(DESTDIR)/usr/bin
-	install -o root -m 0755 debian/docker.initd $(DESTDIR)/etc/init.d/docker
+	mkdir -p $(DESTDIR)/usr/share/man/man1
+	mkdir -p $(DESTDIR)/usr/share/doc/lxc-docker
+	install -m 0755 src/${GITHUB_PATH}/docker/docker $(DESTDIR)/usr/bin/lxc-docker
+	cp debian/lxc-docker.1 $(DESTDIR)/usr/share/man/man1
+	cp debian/CHANGELOG.md $(DESTDIR)/usr/share/doc/lxc-docker/changelog
 
 
 debian:
 debian:
-	# This Makefile will compile the github master branch of dotcloud/docker
-	# Retrieve docker project and its go structure from internet
-	rm -rf ${BUILD_SRC}
-	GOPATH=${BUILD_SRC} go get ${GITHUB_PATH}
+	# Prepare docker source from revision ${VERSION_TAG}
+	rm -rf ${BUILD_SRC} ${PKG_NAME}_[0-9]*
+	git clone file://$(ROOT_PATH) ${BUILD_SRC}/src/${GITHUB_PATH} --branch ${VERSION_TAG} --depth 1
+	GOPATH=${CURDIR}/${BUILD_SRC} go get -d ${GITHUB_PATH}
 	# Add debianization
 	# Add debianization
 	mkdir ${BUILD_SRC}/debian
 	mkdir ${BUILD_SRC}/debian
 	cp Makefile ${BUILD_SRC}
 	cp Makefile ${BUILD_SRC}
-	cp -r * ${BUILD_SRC}/debian
-	cp ../../README.md ${BUILD_SRC}
+	cp -r `ls | grep -v ${BUILD_SRC}` ${BUILD_SRC}/debian
+	cp ${ROOT_PATH}/README.md ${BUILD_SRC}
+	cp ${ROOT_PATH}/CHANGELOG.md ${BUILD_SRC}/debian
 	# Cleanup
 	# Cleanup
-	for d in `find ${BUILD_SRC} -name '.git*'`; do rm -rf $$d; done
-	rm -rf ${BUILD_SRC}/../${SOURCE_PKG}
-	rm -rf ${BUILD_SRC}/pkg
+	rm -rf `find . -name '.git*'`
+	rm -f ${DOCKER_VERSION}*
 	# Create docker debian files
 	# Create docker debian files
-	cd ${BUILD_SRC}; tar czf ../${SOURCE_PKG} .
-	cd ${BUILD_SRC}; dpkg-buildpackage
+	cd ${BUILD_SRC}; tar czf ../${DOCKER_VERSION}.orig.tar.gz .
+	cd ${BUILD_SRC}; dpkg-buildpackage -us -uc
 	rm -rf ${BUILD_SRC}
 	rm -rf ${BUILD_SRC}
+	# Sign package and upload it to PPA if GPG_KEY environment variable
+	# holds a private GPG KEY
+	if /usr/bin/test "$${GPG_KEY}" == ""; then exit 2; fi
+	mkdir ${BUILD_SRC}
+	# Import gpg signing key
+	echo "$${GPG_KEY}" | gpg --allow-secret-key-import --import
+	# Sign the package
+	cd ${BUILD_SRC}; dpkg-source -x ${CURDIR}/${DOCKER_VERSION}-1.dsc
+	cd ${BUILD_SRC}/${PKG_NAME}-${VERSION}; debuild -S -sa

+ 9 - 12
packaging/debian/README.debian → packaging/debian/README.Debian

@@ -3,29 +3,26 @@ Docker on Debian
 
 
 Docker has been built and tested on Wheezy. All docker functionality works
 Docker has been built and tested on Wheezy. All docker functionality works
 out of the box, except for memory limitation as the stock debian kernel
 out of the box, except for memory limitation as the stock debian kernel
-does not support it yet.
+disables it by default. To enable docker memory limitation, the kernel needs
+to be loaded with boot parameters: cgroup_enable=memory swapaccount=1
 
 
 
 
 Building docker package
 Building docker package
 ~~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~
 
 
-Building Dependencies: debhelper, autotools-dev and golang
-
-
 Assuming you have a wheezy system up and running
 Assuming you have a wheezy system up and running
 
 
-# Download a fresh copy of the docker project
-git clone https://github.com/dotcloud/docker.git
-cd docker
-
 # Get building dependencies
 # Get building dependencies
-sudo apt-get update ; sudo apt-get install -y debhelper autotools-dev golang
+sudo apt-get update
+sudo apt-get install -y debhelper build-essential autotools-dev golang
 
 
-# Make the debian package, with no memory limitation support
-(cd packaging/debian; make debian NO_MEMORY_LIMIT=1)
+# Make the debian package
+git clone https://github.com/dotcloud/docker.git
+cd docker/packaging/debian
+make debian
 
 
 
 
 Install docker package
 Install docker package
 ~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~
 
 
-sudo dpkg -i lxc-docker_0.1.4-1_amd64.deb; sudo apt-get install -f -y
+sudo dpkg -i lxc-docker_*-1_amd64.deb; sudo apt-get install -f -y

+ 13 - 17
packaging/debian/Vagrantfile

@@ -1,22 +1,18 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
+VM_IP = "192.168.33.31"
+PKG_DEP = "git debhelper build-essential autotools-dev devscripts golang"
 
 
-$BUILDBOT_IP = '192.168.33.31'
+Vagrant::Config.run do |config|
+  config.vm.box = 'debian-7.0.rc1.64'
+  config.vm.box_url = 'http://puppet-vagrant-boxes.puppetlabs.com/debian-70rc1-x64-vbox4210-nocm.box'
+  config.vm.share_folder 'v-data', '/data/docker', "#{File.dirname(__FILE__)}/../.."
+  config.vm.network :hostonly,VM_IP
 
 
-def v10(config)
-  config.vm.box = 'debian'
-  config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/../..'
-  config.vm.network :hostonly, $BUILDBOT_IP
+  # Add kernel cgroup memory limitation boot parameters
+  grub_cmd="sed -i 's#DEFAULT=\"quiet\"#DEFAULT=\"cgroup_enable=memory swapaccount=1 quiet\"#' /etc/default/grub"
+  config.vm.provision :shell, :inline => "#{grub_cmd};update-grub"
 
 
   # Install debian packaging dependencies and create debian packages
   # Install debian packaging dependencies and create debian packages
-  config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y debhelper autotools-dev golang'
-  config.vm.provision :shell, :inline => 'cd /data/docker/packaging/debian; make debian'
-end
-
-Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
-  v10(config)
-end
-
-Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
-  v10(config)
+  pkg_cmd = "apt-get -qq update; DEBIAN_FRONTEND=noninteractive apt-get install -qq -y #{PKG_DEP}; " \
+      "export GPG_KEY='#{ENV['GPG_KEY']}'; cd /data/docker/packaging/debian; make debian"
+  config.vm.provision :shell, :inline => pkg_cmd
 end
 end

+ 11 - 9
packaging/debian/changelog

@@ -1,14 +1,16 @@
-lxc-docker (0.1.4-1) unstable; urgency=low
+lxc-docker (0.3.2-1) UNRELEASED; urgency=low
+  - Runtime: Store the actual archive on commit
+  - Registry: Improve the checksum process
+  - Registry: Use the size to have a good progress bar while pushing
+  - Registry: Use the actual archive if it exists in order to speed up the push
+  - Registry: Fix error 400 on push
 
 
-  Improvements [+], Updates [*], Bug fixes [-]:
-  * Changed default bridge interface do 'docker0'
-  - Fix a race condition when running the port allocator
+ -- Daniel Mizyrycki <daniel@dotcloud.com>  Sun, 12 May 2013 00:00:00 -0700
 
 
- -- Daniel Mizyrycki <daniel@dotcloud.com>  Wed, 10 Apr 2013 18:06:21 -0700
 
 
+lxc-docker (0.2.0-1) UNRELEASED; urgency=low
+ 
+  - Pre-release (Closes: #706060)
 
 
-lxc-docker (0.1.0-1) unstable; urgency=low
+ -- Daniel Mizyrycki <daniel@dotcloud.com>  Fri, 26 Apr 2013 23:41:29 -0700
 
 
-  * Initial release
-
- -- Daniel Mizyrycki <daniel@dotcloud.com>  Mon, 29 Mar 2013 18:09:55 -0700

+ 6 - 5
packaging/debian/control

@@ -2,15 +2,16 @@ Source: lxc-docker
 Section: admin
 Section: admin
 Priority: optional
 Priority: optional
 Maintainer: Daniel Mizyrycki <daniel@dotcloud.com>
 Maintainer: Daniel Mizyrycki <daniel@dotcloud.com>
-Build-Depends: debhelper (>= 9),autotools-dev,golang
-Standards-Version: 3.9.3
+Build-Depends: debhelper (>=9), autotools-dev, golang
+Standards-Version: 3.9.4
+Vcs-Git: git://git.debian.org/git/collab-maint/lxc-docker.git
+Vcs-Browser: http://anonscm.debian.org/gitweb/?p=collab-maint/lxc-docker.git;a=summary
 Homepage: http://github.com/dotcloud/docker
 Homepage: http://github.com/dotcloud/docker
 
 
 Package: lxc-docker
 Package: lxc-docker
 Architecture: linux-any
 Architecture: linux-any
-Depends: ${misc:Depends},${shlibs:Depends},lxc,bsdtar
-Conflicts: docker
-Description: lxc-docker is a Linux container runtime
+Depends: ${shlibs:Depends}, ${misc:Depends}, lxc, bsdtar
+Description: Linux container runtime
  Docker complements LXC with a high-level API which operates at the process
  Docker complements LXC with a high-level API which operates at the process
  level. It runs unix processes with strong guarantees of isolation and
  level. It runs unix processes with strong guarantees of isolation and
  repeatability across servers.
  repeatability across servers.

+ 7 - 222
packaging/debian/copyright

@@ -1,237 +1,22 @@
 Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
 Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
 Upstream-Name: docker
 Upstream-Name: docker
-Upstream-Contact: DotCloud Inc <opensource@dotcloud.com>
+Upstream-Contact: dotCloud Inc <opensource@dotcloud.com>
 Source: http://github.com/dotcloud/docker
 Source: http://github.com/dotcloud/docker
 
 
 Files: *
 Files: *
-Copyright: 2012, DotCloud Inc <opensource@dotcloud.com>
+Copyright: 2012, dotCloud Inc <opensource@dotcloud.com>
 License: Apache-2.0
 License: Apache-2.0
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
- 
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
- 
- 1. Definitions.
- 
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
- 
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
- 
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
- 
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
- 
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
- 
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
- 
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
- 
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
- 
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
- 
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
- 
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
- 
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
- 
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
- 
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
- 
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
- 
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
- 
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
- 
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
- 
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
- 
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
- 
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
- 
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
- 
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
- 
- END OF TERMS AND CONDITIONS
- 
- APPENDIX: How to apply the Apache License to your work.
- 
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
- 
- Copyright 2012 DotCloud Inc
- 
  Licensed under the Apache License, Version 2.0 (the "License");
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at
  You may obtain a copy of the License at
- 
+ .
  http://www.apache.org/licenses/LICENSE-2.0
  http://www.apache.org/licenses/LICENSE-2.0
- 
+ .
  Unless required by applicable law or agreed to in writing, software
  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  See the License for the specific language governing permissions and
  limitations under the License.
  limitations under the License.
- 
- 
-Files: src/github.com/kr/pty/*
-Copyright: Copyright (c) 2011 Keith Rarick
-License: Expat
- Copyright (c) 2011 Keith Rarick
- 
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated
- documentation files (the "Software"), to deal in the
- Software without restriction, including without limitation
- the rights to use, copy, modify, merge, publish, distribute,
- sublicense, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so,
- subject to the following conditions:
- 
- The above copyright notice and this permission notice shall
- be included in all copies or substantial portions of the
- Software.
- 
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
- KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
- WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
- PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
- OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ .
+ On Debian systems, the complete text of the Apache version 2.0 license
+ can be found in "/usr/share/common-licenses/Apache-2.0".

+ 0 - 49
packaging/debian/docker.initd

@@ -1,49 +0,0 @@
-#!/bin/sh
-
-### BEGIN INIT INFO
-# Provides:          docker
-# Required-Start:    $local_fs
-# Required-Stop:     $local_fs
-# Default-Start:     2 3 4 5
-# Default-Stop:      0 1 6
-# Short-Description: docker
-# Description:       docker daemon
-### END INIT INFO
-
-DOCKER=/usr/bin/docker
-PIDFILE=/var/run/docker.pid
-
-# Check docker is present
-[ -x $DOCKER ] || log_success_msg "Docker not present"
-
-# Get lsb functions
-. /lib/lsb/init-functions
-
-
-case "$1" in
-  start)
-    log_begin_msg "Starting docker..."
-    start-stop-daemon --start --background --exec "$DOCKER" -- -d
-    log_end_msg $?
-    ;;
-  stop)
-    log_begin_msg "Stopping docker..."
-    docker_pid=`pgrep -f "$DOCKER -d"`
-    [ -n "$docker_pid" ] && kill $docker_pid
-    log_end_msg $?
-    ;;
-  status)
-    docker_pid=`pgrep -f "$DOCKER -d"`
-    if [ -z "$docker_pid" ] ; then
-      echo "docker not running"
-    else
-      echo "docker running (pid $docker_pid)"
-    fi
-    ;;
-  *)
-    echo "Usage: /etc/init.d/docker {start|stop|status}"
-    exit 1
-    ;;
-esac
-
-exit 0

+ 1149 - 0
packaging/debian/lxc-docker.1

@@ -0,0 +1,1149 @@
+.TH "DOCKER" "1" "May 07, 2013" "0.1" "Docker"
+.SH NAME
+docker \- Docker Documentation
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.sp
+This documentation has the following resources:
+.SH CONCEPTS
+.sp
+Contents:
+.SS Standard Containers
+.SS What is a Standard Container?
+.sp
+Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
+a format that is self\-describing and portable, so that any compliant runtime can run it without extra dependency, regardless of the underlying machine and the contents of the container.
+.sp
+The spec for Standard Containers is currently work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
+.sp
+A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (\fI\%http://bricks.argz.com/ins/7823-1/12\fP) are a fundamental unit of physical delivery.
+.SS Standard operations
+.sp
+Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
+.SS Content\-agnostic
+.sp
+Just like shipping containers, Standard Containers are CONTENT\-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
+.SS Infrastructure\-agnostic
+.sp
+Both types of containers are INFRASTRUCTURE\-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian\-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home\-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
+.SS Designed for automation
+.sp
+Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well\-suited for automation. In fact, you could say automation is their secret weapon.
+.sp
+Many things that once required time\-consuming and error\-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune \- and was entirely different depending on the facility and the type of goods.
+.sp
+Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post\-it notes were lost, logs were misplaced, cluster updates were half\-broken. The process was slow, inefficient and cost a fortune \- and was entirely different depending on the language and infrastructure provider.
+.SS Industrial\-grade delivery
+.sp
+There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half\-way across the World in \fIless time\fP than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
+.sp
+With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL\-GRADE DELIVERY of software a reality.
+.SS Standard Container Specification
+.sp
+(TODO)
+.SS Image format
+.SS Standard operations
+.INDENT 0.0
+.IP \(bu 2
+Copy
+.IP \(bu 2
+Run
+.IP \(bu 2
+Stop
+.IP \(bu 2
+Wait
+.IP \(bu 2
+Commit
+.IP \(bu 2
+Attach standard streams
+.IP \(bu 2
+List filesystem changes
+.IP \(bu 2
+.UNINDENT
+.SS Execution environment
+.SS Root filesystem
+.SS Environment variables
+.SS Process arguments
+.SS Networking
+.SS Process namespacing
+.SS Resource limits
+.SS Process monitoring
+.SS Logging
+.SS Signals
+.SS Pseudo\-terminal allocation
+.SS Security
+.SH INSTALLATION
+.sp
+Contents:
+.SS Ubuntu Linux
+.INDENT 0.0
+.INDENT 3.5
+\fBPlease note this project is currently under heavy development. It should not be used in production.\fP
+.UNINDENT
+.UNINDENT
+.sp
+Installing on Ubuntu 12.04 and 12.10
+.sp
+Right now, the officially supported distributions are:
+.sp
+Ubuntu 12.04 (precise LTS)
+Ubuntu 12.10 (quantal)
+Docker probably works on other distributions featuring a recent kernel, the AUFS patch, and up\-to\-date lxc. However this has not been tested.
+.SS Install dependencies:
+.sp
+.nf
+.ft C
+sudo apt\-get install lxc wget bsdtar curl
+sudo apt\-get install linux\-image\-extra\-\(gauname \-r\(ga
+.ft P
+.fi
+.sp
+The linux\-image\-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
+.sp
+Install the latest docker binary:
+.sp
+.nf
+.ft C
+wget http://get.docker.io/builds/$(uname \-s)/$(uname \-m)/docker\-master.tgz
+tar \-xf docker\-master.tgz
+.ft P
+.fi
+.sp
+Run your first container!
+.sp
+.nf
+.ft C
+cd docker\-master
+.ft P
+.fi
+.sp
+.nf
+.ft C
+sudo ./docker run \-i \-t base /bin/bash
+.ft P
+.fi
+.sp
+Consider adding docker to your PATH for simplicity.
+.sp
+Continue with the \fIhello_world\fP example.
+.SS Mac OS X and other linux
+.INDENT 0.0
+.INDENT 3.5
+Please note this is a community contributed installation path. The only \(aqofficial\(aq installation is using the \fIubuntu_linux\fP installation path. This version
+may be out of date because it depends on some binaries to be updated and published
+.UNINDENT
+.UNINDENT
+.SS Requirements
+.sp
+We currently rely on some Ubuntu\-linux specific packages, this will change in the future, but for now we provide a
+streamlined path to install Virtualbox with a Ubuntu 12.10 image using Vagrant.
+.INDENT 0.0
+.IP 1. 3
+Install virtualbox from \fI\%https://www.virtualbox.org/\fP (or use your package manager)
+.IP 2. 3
+Install vagrant from \fI\%http://www.vagrantup.com/\fP (or use your package manager)
+.IP 3. 3
+Install git if you had not installed it before, check if it is installed by running
+\fBgit\fP in a terminal window
+.UNINDENT
+.sp
+We recommend having at least about 2Gb of free disk space and 2Gb RAM (or more).
+.SS Installation
+.INDENT 0.0
+.IP 1. 3
+Fetch the docker sources
+.UNINDENT
+.sp
+.nf
+.ft C
+git clone https://github.com/dotcloud/docker.git
+.ft P
+.fi
+.INDENT 0.0
+.IP 2. 3
+Run vagrant from the sources directory
+.UNINDENT
+.sp
+.nf
+.ft C
+vagrant up
+.ft P
+.fi
+.sp
+Vagrant will:
+.INDENT 0.0
+.IP \(bu 2
+Download the Quantal64 base ubuntu virtual machine image from get.docker.io/
+.IP \(bu 2
+Boot this image in virtualbox
+.UNINDENT
+.sp
+Then it will use Puppet to perform an initial setup in this machine:
+.INDENT 0.0
+.IP \(bu 2
+Download & untar the most recent docker binary tarball to vagrant homedir.
+.IP \(bu 2
+Debootstrap to /var/lib/docker/images/ubuntu.
+.IP \(bu 2
+Install & run dockerd as service.
+.IP \(bu 2
+Put docker in /usr/local/bin.
+.IP \(bu 2
+Put latest Go toolchain in /usr/local/go.
+.UNINDENT
+.sp
+You now have a Ubuntu Virtual Machine running with docker pre\-installed.
+.sp
+To access the VM and use Docker, Run \fBvagrant ssh\fP from the same directory as where you ran
+\fBvagrant up\fP. Vagrant will make sure to connect you to the correct VM.
+.sp
+.nf
+.ft C
+vagrant ssh
+.ft P
+.fi
+.sp
+Now you are in the VM, run docker
+.sp
+.nf
+.ft C
+docker
+.ft P
+.fi
+.sp
+Continue with the \fIhello_world\fP example.
+.SS Windows
+.INDENT 0.0
+.INDENT 3.5
+Please note this is a community contributed installation path. The only \(aqofficial\(aq installation is using the \fIubuntu_linux\fP installation path. This version
+may be out of date because it depends on some binaries to be updated and published
+.UNINDENT
+.UNINDENT
+.SS Requirements
+.INDENT 0.0
+.IP 1. 3
+Install virtualbox from \fI\%https://www.virtualbox.org\fP \- or follow this \fI\%tutorial\fP
+.UNINDENT
+.INDENT 0.0
+.IP 2. 3
+Install vagrant from \fI\%http://www.vagrantup.com\fP \- or follow this \fI\%tutorial\fP
+.UNINDENT
+.INDENT 0.0
+.IP 3. 3
+Install git with ssh from \fI\%http://git-scm.com/downloads\fP \- or follow this \fI\%tutorial\fP
+.UNINDENT
+.sp
+We recommend having at least 2Gb of free disk space and 2Gb of RAM (or more).
+.SS Opening a command prompt
+.sp
+First open a cmd prompt. Press Windows key and then press “R” key. This will open the RUN dialog box for you. Type “cmd” and press Enter. Or you can click on Start, type “cmd” in the “Search programs and files” field, and click on cmd.exe.
+[image: Git install]
+[image]
+.sp
+This should open a cmd prompt window.
+[image: run docker]
+[image]
+.sp
+Alternatively, you can also use a Cygwin terminal, or Git Bash (or any other command line program you are usually using). The next steps would be the same.
+.SS Launch an Ubuntu virtual server
+.sp
+Let’s download and run an Ubuntu image with docker binaries already installed.
+.sp
+.nf
+.ft C
+git clone https://github.com/dotcloud/docker.git
+cd docker
+vagrant up
+.ft P
+.fi
+[image: run docker]
+[image]
+.sp
+Congratulations! You are running an Ubuntu server with docker installed on it. You do not see it though, because it is running in the background.
+.SS Log onto your Ubuntu server
+.sp
+Let’s log into your Ubuntu server now. To do so you have two choices:
+.INDENT 0.0
+.IP \(bu 2
+Use Vagrant on Windows command prompt OR
+.IP \(bu 2
+Use SSH
+.UNINDENT
+.SS Using Vagrant on Windows Command Prompt
+.sp
+Run the following command
+.sp
+.nf
+.ft C
+vagrant ssh
+.ft P
+.fi
+.sp
+You may see an error message starting with “\fIssh\fP executable not found”. In this case it means that you do not have SSH in your PATH. If you do not have SSH in your PATH you can set it up with the “set” command. For instance, if your ssh.exe is in the folder named “C:Program Files (x86)Gitbin”, then you can run the following command:
+.sp
+.nf
+.ft C
+set PATH=%PATH%;C:\eProgram Files (x86)\eGit\ebin
+.ft P
+.fi
+[image: run docker]
+[image]
+.SS Using SSH
+.sp
+First step is to get the IP and port of your Ubuntu server. Simply run:
+.sp
+.nf
+.ft C
+vagrant ssh\-config
+.ft P
+.fi
+.sp
+You should see an output with HostName and Port information. In this example, HostName is 127.0.0.1 and port is 2222. And the User is “vagrant”. The password is not shown, but it is also “vagrant”.
+[image: run docker]
+[image]
+.sp
+You can now use this information for connecting via SSH to your server. To do so you can:
+.INDENT 0.0
+.IP \(bu 2
+Use putty.exe OR
+.IP \(bu 2
+Use SSH from a terminal
+.UNINDENT
+.SS Use putty.exe
+.sp
+You can download putty.exe from this page \fI\%http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html\fP
+Launch putty.exe and simply enter the information you got from last step.
+[image: run docker]
+[image]
+.sp
+Open, and enter user = vagrant and password = vagrant.
+[image: run docker]
+[image]
+.SS SSH from a terminal
+.sp
+You can also run this command on your favorite terminal (windows prompt, cygwin, git\-bash, …). Make sure to adapt the IP and port from what you got from the vagrant ssh\-config command.
+.sp
+.nf
+.ft C
+ssh vagrant@127.0.0.1 –p 2222
+.ft P
+.fi
+.sp
+Enter user = vagrant and password = vagrant.
+[image: run docker]
+[image]
+.sp
+Congratulations, you are now logged onto your Ubuntu Server, running on top of your Windows machine !
+.SS Running Docker
+.sp
+First you have to be root in order to run docker. Simply run the following command:
+.sp
+.nf
+.ft C
+sudo su
+.ft P
+.fi
+.sp
+You are now ready for the docker’s “hello world” example. Run
+.sp
+.nf
+.ft C
+docker run \-a busybox echo hello world
+.ft P
+.fi
+[image: run docker]
+[image]
+.sp
+All done!
+.sp
+Now you can continue with the \fIhello_world\fP example.
+.SS Amazon EC2
+.INDENT 0.0
+.INDENT 3.5
+Please note this is a community contributed installation path. The only \(aqofficial\(aq installation is using the \fIubuntu_linux\fP installation path. This version
+may be out of date because it depends on some binaries to be updated and published
+.UNINDENT
+.UNINDENT
+.SS Installation
+.sp
+Docker can now be installed on Amazon EC2 with a single vagrant command. Vagrant 1.1 or higher is required.
+.INDENT 0.0
+.IP 1. 3
+Install vagrant from \fI\%http://www.vagrantup.com/\fP (or use your package manager)
+.IP 2. 3
+Install the vagrant aws plugin
+.sp
+.nf
+.ft C
+vagrant plugin install vagrant\-aws
+.ft P
+.fi
+.IP 3. 3
+Get the docker sources, this will give you the latest Vagrantfile and puppet manifests.
+.sp
+.nf
+.ft C
+git clone https://github.com/dotcloud/docker.git
+.ft P
+.fi
+.IP 4. 3
+Check your AWS environment.
+.sp
+Create a keypair specifically for EC2, give it a name and save it to your disk. \fII usually store these in my ~/.ssh/ folder\fP.
+.sp
+Check that your default security group has an inbound rule to accept SSH (port 22) connections.
+.IP 5. 3
+Inform Vagrant of your settings
+.sp
+Vagrant will read your access credentials from your environment, so we need to set them there first. Make sure
+you have everything on amazon aws setup so you can (manually) deploy a new image to EC2.
+.sp
+.nf
+.ft C
+export AWS_ACCESS_KEY_ID=xxx
+export AWS_SECRET_ACCESS_KEY=xxx
+export AWS_KEYPAIR_NAME=xxx
+export AWS_SSH_PRIVKEY=xxx
+.ft P
+.fi
+.sp
+The environment variables are:
+.INDENT 3.0
+.IP \(bu 2
+\fBAWS_ACCESS_KEY_ID\fP \- The API key used to make requests to AWS
+.IP \(bu 2
+\fBAWS_SECRET_ACCESS_KEY\fP \- The secret key to make AWS API requests
+.IP \(bu 2
+\fBAWS_KEYPAIR_NAME\fP \- The name of the keypair used for this EC2 instance
+.IP \(bu 2
+\fBAWS_SSH_PRIVKEY\fP \- The path to the private key for the named keypair, for example \fB~/.ssh/docker.pem\fP
+.UNINDENT
+.sp
+You can check if they are set correctly by doing something like
+.sp
+.nf
+.ft C
+echo $AWS_ACCESS_KEY_ID
+.ft P
+.fi
+.IP 6. 3
+Do the magic!
+.sp
+.nf
+.ft C
+vagrant up \-\-provider=aws
+.ft P
+.fi
+.sp
+If it stalls indefinitely on \fB[default] Waiting for SSH to become available...\fP, Double check your default security
+zone on AWS includes rights to SSH (port 22) to your container.
+.sp
+If you have an advanced AWS setup, you might want to have a look at the \fI\%https://github.com/mitchellh/vagrant-aws\fP
+.IP 7. 3
+Connect to your machine
+.sp
+.nf
+.ft C
+vagrant ssh
+.ft P
+.fi
+.IP 8. 3
+Your first command
+.sp
+Now you are in the VM, run docker
+.sp
+.nf
+.ft C
+docker
+.ft P
+.fi
+.UNINDENT
+.sp
+Continue with the \fIhello_world\fP example.
+.SH EXAMPLES
+.sp
+Contents:
+.SS Hello World
+.sp
+This is the most basic example available for using docker
+.sp
+This example assumes you have Docker installed.
+.sp
+Download the base container
+.sp
+.nf
+.ft C
+# Download a base image
+docker pull base
+.ft P
+.fi
+.sp
+The \fIbase\fP image is a minimal \fIubuntu\fP based container, alternatively you can select \fIbusybox\fP, a bare
+minimal linux system. The images are retrieved from the docker repository.
+.sp
+.nf
+.ft C
+#run a simple echo command, that will echo hello world back to the console over standard out.
+docker run base /bin/echo hello world
+.ft P
+.fi
+.sp
+\fBExplanation:\fP
+.INDENT 0.0
+.IP \(bu 2
+\fB"docker run"\fP run a command in a new container
+.IP \(bu 2
+\fB"base"\fP is the image we want to run the command inside of.
+.IP \(bu 2
+\fB"/bin/echo"\fP is the command we want to run in the container
+.IP \(bu 2
+\fB"hello world"\fP is the input for the echo command
+.UNINDENT
+.sp
+\fBVideo:\fP
+.sp
+See the example in action
+.sp
+Continue to the \fIhello_world_daemon\fP example.
+.SS Hello World Daemon
+.sp
+The most boring daemon ever written.
+.sp
+This example assumes you have Docker installed and with the base image already imported \fBdocker pull base\fP.
+We will use the base image to run a simple hello world daemon that will just print hello world to standard
+out every second. It will continue to do this until we stop it.
+.sp
+\fBSteps:\fP
+.sp
+.nf
+.ft C
+$ CONTAINER_ID=$(docker run \-d base /bin/sh \-c "while true; do echo hello world; sleep 1; done")
+.ft P
+.fi
+.sp
+We are going to run a simple hello world daemon in a new container made from the busybox daemon.
+.INDENT 0.0
+.IP \(bu 2
+\fB"docker run \-d "\fP run a command in a new container. We pass "\-d" so it runs as a daemon.
+.IP \(bu 2
+\fB"base"\fP is the image we want to run the command inside of.
+.IP \(bu 2
+\fB"/bin/sh \-c"\fP is the command we want to run in the container
+.IP \(bu 2
+\fB"while true; do echo hello world; sleep 1; done"\fP is the mini script we want to run, that will just print hello world once a second until we stop it.
+.IP \(bu 2
+\fB$CONTAINER_ID\fP the output of the run command will return a container id, we can use in future commands to see what is going on with this process.
+.UNINDENT
+.sp
+.nf
+.ft C
+$ docker logs $CONTAINER_ID
+.ft P
+.fi
+.sp
+Check the logs make sure it is working correctly.
+.INDENT 0.0
+.IP \(bu 2
+\fB"docker logs\fP" This will return the logs for a container
+.IP \(bu 2
+\fB$CONTAINER_ID\fP The Id of the container we want the logs for.
+.UNINDENT
+.sp
+.nf
+.ft C
+docker attach $CONTAINER_ID
+.ft P
+.fi
+.sp
+Attach to the container to see the results in realtime.
+.INDENT 0.0
+.IP \(bu 2
+\fB"docker attach\fP" This will allow us to attach to a background process to see what is going on.
+.IP \(bu 2
+\fB$CONTAINER_ID\fP The Id of the container we want to attach too.
+.UNINDENT
+.sp
+.nf
+.ft C
+docker ps
+.ft P
+.fi
+.sp
+Check the process list to make sure it is running.
+.INDENT 0.0
+.IP \(bu 2
+\fB"docker ps"\fP this shows all running process managed by docker
+.UNINDENT
+.sp
+.nf
+.ft C
+$ docker stop $CONTAINER_ID
+.ft P
+.fi
+.sp
+Stop the container, since we don\(aqt need it anymore.
+.INDENT 0.0
+.IP \(bu 2
+\fB"docker stop"\fP This stops a container
+.IP \(bu 2
+\fB$CONTAINER_ID\fP The Id of the container we want to stop.
+.UNINDENT
+.sp
+.nf
+.ft C
+docker ps
+.ft P
+.fi
+.sp
+Make sure it is really stopped.
+.sp
+\fBVideo:\fP
+.sp
+See the example in action
+.sp
+Continue to the \fIpython_web_app\fP example.
+.SS Notes:
+.INDENT 0.0
+.IP \(bu 2
+\fBDocker daemon\fP The docker daemon is started by \fBsudo docker \-d\fP, Vagrant may have started
+the Docker daemon for you, but you will need to restart it this way if it was terminated. Otherwise
+it may give you \fBCouldn\(aqt create Tag store: open /var/lib/docker/repositories: permission denied\fP
+.UNINDENT
+.SS Building a python web app
+.sp
+The goal of this example is to show you how you can author your own docker images using a parent image, making changes to it, and then saving the results as a new image. We will do that by making a simple hello flask web application image.
+.sp
+\fBSteps:\fP
+.sp
+.nf
+.ft C
+$ docker import shykes/pybuilder
+.ft P
+.fi
+.sp
+We are importing the "shykes/pybuilder" docker image
+.sp
+.nf
+.ft C
+$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz
+.ft P
+.fi
+.sp
+We set a URL variable that points to a tarball of a simple helloflask web app
+.sp
+.nf
+.ft C
+$ BUILD_JOB=$(docker run \-t shykes/pybuilder:1d9aab3737242c65 /usr/local/bin/buildapp $URL)
+.ft P
+.fi
+.sp
+Inside of the "shykes/pybuilder" image there is a command called buildapp, we are running that command and passing the $URL variable from step 2 to it, and running the whole thing inside of a new container. BUILD_JOB will be set with the new container_id. "1d9aab3737242c65" came from the output of step 1 when importing image. also available from \(aqdocker images\(aq.
+.sp
+.nf
+.ft C
+$ docker attach $BUILD_JOB
+[...]
+.ft P
+.fi
+.sp
+We attach to the new container to see what is going on. Ctrl\-C to disconnect
+.sp
+.nf
+.ft C
+$ BUILD_IMG=$(docker commit $BUILD_JOB _/builds/github.com/hykes/helloflask/master)
+.ft P
+.fi
+.sp
+Save the changed we just made in the container to a new image called "_/builds/github.com/hykes/helloflask/master" and save the image id in the BUILD_IMG variable name.
+.sp
+.nf
+.ft C
+$ WEB_WORKER=$(docker run \-p 5000 $BUILD_IMG /usr/local/bin/runapp)
+.ft P
+.fi
+.sp
+Use the new image we just created and create a new container with network port 5000, and return the container id and store in the WEB_WORKER variable.
+.sp
+.nf
+.ft C
+$ docker logs $WEB_WORKER
+ * Running on http://0.0.0.0:5000/
+.ft P
+.fi
+.sp
+view the logs for the new container using the WEB_WORKER variable, and if everything worked as planned you should see the line "Running on \fI\%http://0.0.0.0:5000/\fP" in the log output.
+.sp
+\fBVideo:\fP
+.sp
+See the example in action
+.sp
+Continue to the \fI\%base commands\fP
+.SH CONTRIBUTING
+.SS Contributing to Docker
+.sp
+Want to hack on Docker? Awesome! There are instructions to get you
+started on the website: \fI\%http://docker.io/gettingstarted.html\fP
+.sp
+They are probably not perfect, please let us know if anything feels
+wrong or incomplete.
+.SS Contribution guidelines
+.SS Pull requests are always welcome
+.sp
+We are always thrilled to receive pull requests, and do our best to
+process them as fast as possible. Not sure if that typo is worth a pull
+request? Do it! We will appreciate it.
+.sp
+If your pull request is not accepted on the first try, don\(aqt be
+discouraged! If there\(aqs a problem with the implementation, hopefully you
+received feedback on what to improve.
+.sp
+We\(aqre trying very hard to keep Docker lean and focused. We don\(aqt want it
+to do everything for everybody. This means that we might decide against
+incorporating a new feature. However, there might be a way to implement
+that feature \fIon top of\fP docker.
+.SS Discuss your design on the mailing list
+.sp
+We recommend discussing your plans \fI\%on the mailing
+list\fP
+before starting to code \- especially for more ambitious contributions.
+This gives other contributors a chance to point you in the right
+direction, give feedback on your design, and maybe point out if someone
+else is working on the same thing.
+.SS Create issues...
+.sp
+Any significant improvement should be documented as \fI\%a github
+issue\fP before anybody
+starts working on it.
+.SS ...but check for existing issues first!
+.sp
+Please take a moment to check that an issue doesn\(aqt already exist
+documenting your bug report or improvement proposal. If it does, it
+never hurts to add a quick "+1" or "I have this problem too". This will
+help prioritize the most common problems and requests.
+.SS Write tests
+.sp
+Golang has a great testing suite built in: use it! Take a look at
+existing tests for inspiration.
+.SS Setting up a dev environment
+.sp
+Instructions that have been verified to work on Ubuntu 12.10,
+.sp
+Then run the docker daemon,
+.sp
+Run the \fBgo install\fP command (above) to recompile docker.
+.SH COMMANDS
+.sp
+Contents:
+.SS Base commands
+.SS Running an interactive shell
+.sp
+.nf
+.ft C
+# Download a base image
+docker import base
+
+# Run an interactive shell in the base image,
+# allocate a tty, attach stdin and stdout
+docker run \-a \-i \-t base /bin/bash
+.ft P
+.fi
+.SS Starting a long\-running worker process
+.sp
+.nf
+.ft C
+# Run docker in daemon mode
+(docker \-d || echo "Docker daemon already running") &
+
+# Start a very useful long\-running process
+JOB=$(docker run base /bin/sh \-c "while true; do echo Hello world!; sleep 1; done")
+
+# Collect the output of the job so far
+docker logs $JOB
+
+# Kill the job
+docker kill $JOB
+.ft P
+.fi
+.SS Listing all running containers
+.sp
+.nf
+.ft C
+docker ps
+.ft P
+.fi
+.SS Expose a service on a TCP port
+.sp
+.nf
+.ft C
+# Expose port 4444 of this container, and tell netcat to listen on it
+JOB=$(docker run \-p 4444 base /bin/nc \-l \-p 4444)
+
+# Which public port is NATed to my container?
+PORT=$(docker port $JOB 4444)
+
+# Connect to the public port via the host\(aqs public address
+echo hello world | nc $(hostname) $PORT
+
+# Verify that the network connection worked
+echo "Daemon received: $(docker logs $JOB)"
+.ft P
+.fi
+.sp
+Continue to the complete \fI\%Command Line Interface\fP
+.SS Command Line Interface
+.SS Docker Usage
+.sp
+.nf
+.ft C
+$ docker
+  Usage: docker COMMAND [arg...]
+
+  A self\-sufficient runtime for linux containers.
+
+  Commands:
+      attach    Attach to a running container
+      commit    Create a new image from a container\(aqs changes
+      diff      Inspect changes on a container\(aqs filesystem
+      export    Stream the contents of a container as a tar archive
+      history   Show the history of an image
+      images    List images
+      import    Create a new filesystem image from the contents of a tarball
+      info      Display system\-wide information
+      inspect   Return low\-level information on a container
+      kill      Kill a running container
+      login     Register or Login to the docker registry server
+      logs      Fetch the logs of a container
+      port      Lookup the public\-facing port which is NAT\-ed to PRIVATE_PORT
+      ps        List containers
+      pull      Pull an image or a repository to the docker registry server
+      push      Push an image or a repository to the docker registry server
+      restart   Restart a running container
+      rm        Remove a container
+      rmi       Remove an image
+      run       Run a command in a new container
+      start     Start a stopped container
+      stop      Stop a running container
+      tag       Tag an image into a repository
+      version   Show the docker version information
+      wait      Block until a container stops, then print its exit code
+.ft P
+.fi
+.SS attach
+.sp
+.nf
+.ft C
+Usage: docker attach [OPTIONS]
+
+Attach to a running container
+
+  \-e=true: Attach to stderr
+  \-i=false: Attach to stdin
+  \-o=true: Attach to stdout
+.ft P
+.fi
+.SS commit
+.sp
+.nf
+.ft C
+Usage: docker commit [OPTIONS] CONTAINER [DEST]
+
+Create a new image from a container\(aqs changes
+
+\-m="": Commit message
+.ft P
+.fi
+.SS diff
+.sp
+.nf
+.ft C
+Usage: docker diff CONTAINER [OPTIONS]
+
+Inspect changes on a container\(aqs filesystem
+.ft P
+.fi
+.SS export
+.sp
+.nf
+.ft C
+Usage: docker export CONTAINER
+
+Export the contents of a filesystem as a tar archive
+.ft P
+.fi
+.SS history
+.sp
+.nf
+.ft C
+Usage: docker history [OPTIONS] IMAGE
+
+Show the history of an image
+.ft P
+.fi
+.SS images
+.sp
+.nf
+.ft C
+Usage: docker images [OPTIONS] [NAME]
+
+List images
+
+  \-a=false: show all images
+  \-q=false: only show numeric IDs
+.ft P
+.fi
+.SS import
+.sp
+Usage: docker import [OPTIONS] URL|\- [REPOSITORY [TAG]]
+.sp
+Create a new filesystem image from the contents of a tarball
+.SS info
+.sp
+.nf
+.ft C
+Usage: docker info
+
+Display system\-wide information.
+.ft P
+.fi
+.SS inspect
+.sp
+.nf
+.ft C
+Usage: docker inspect [OPTIONS] CONTAINER
+
+Return low\-level information on a container
+.ft P
+.fi
+.SS kill
+.sp
+.nf
+.ft C
+Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...]
+
+Kill a running container
+.ft P
+.fi
+.SS login
+.sp
+.nf
+.ft C
+Usage: docker login
+
+Register or Login to the docker registry server
+.ft P
+.fi
+.SS logs
+.sp
+.nf
+.ft C
+Usage: docker logs [OPTIONS] CONTAINER
+
+Fetch the logs of a container
+.ft P
+.fi
+.SS port
+.sp
+.nf
+.ft C
+Usage: docker port [OPTIONS] CONTAINER PRIVATE_PORT
+
+Lookup the public\-facing port which is NAT\-ed to PRIVATE_PORT
+.ft P
+.fi
+.SS ps
+.sp
+.nf
+.ft C
+Usage: docker ps [OPTIONS]
+
+List containers
+
+  \-a=false: Show all containers. Only running containers are shown by default.
+  \-notrunc=false: Don\(aqt truncate output
+  \-q=false: Only display numeric IDs
+.ft P
+.fi
+.SS pull
+.sp
+.nf
+.ft C
+Usage: docker pull NAME
+
+Pull an image or a repository from the registry
+.ft P
+.fi
+.SS push
+.sp
+.nf
+.ft C
+Usage: docker push NAME
+
+Push an image or a repository to the registry
+.ft P
+.fi
+.SS restart
+.sp
+.nf
+.ft C
+Usage: docker restart [OPTIONS] NAME
+
+Restart a running container
+.ft P
+.fi
+.SS rm
+.sp
+.nf
+.ft C
+Usage: docker rm [OPTIONS] CONTAINER
+
+Remove a container
+.ft P
+.fi
+.SS rmi
+.sp
+.nf
+.ft C
+Usage: docker rmi [OPTIONS] IMAGE
+
+Remove an image
+
+  \-a=false: Use IMAGE as a path and remove ALL images in this path
+  \-r=false: Use IMAGE as a regular expression instead of an exact name
+.ft P
+.fi
+.SS run
+.sp
+.nf
+.ft C
+Usage: docker run [OPTIONS] IMAGE COMMAND [ARG...]
+
+Run a command in a new container
+
+  \-a=false: Attach stdin and stdout
+  \-c="": Comment
+  \-i=false: Keep stdin open even if not attached
+  \-m=0: Memory limit (in bytes)
+  \-p=[]: Map a network port to the container
+  \-t=false: Allocate a pseudo\-tty
+  \-u="": Username or UID
+.ft P
+.fi
+.SS start
+.sp
+.nf
+.ft C
+Usage: docker start [OPTIONS] NAME
+
+Start a stopped container
+.ft P
+.fi
+.SS stop
+.sp
+.nf
+.ft C
+Usage: docker stop [OPTIONS] NAME
+
+Stop a running container
+.ft P
+.fi
+.SS tag
+.sp
+.nf
+.ft C
+Usage: docker tag [OPTIONS] IMAGE REPOSITORY [TAG]
+
+Tag an image into a repository
+
+  \-f=false: Force
+.ft P
+.fi
+.SS version
+.sp
+.nf
+.ft C
+Usage: docker version
+
+Show the docker version information
+.ft P
+.fi
+.SS wait
+.sp
+.nf
+.ft C
+Usage: docker wait [OPTIONS] NAME
+
+Block until a container stops, then print its exit code.
+.ft P
+.fi
+.SH FAQ
+.SS Most frequently asked questions.
+.sp
+\fB1. How much does Docker cost?\fP
+.sp
+Docker is 100% free, it is open source, so you can use it without paying.
+.sp
+\fB2. What open source license are you using?\fP
+.sp
+We are using the Apache License Version 2.0, see it here: \fI\%https://github.com/dotcloud/docker/blob/master/LICENSE\fP
+.sp
+\fB3. Does Docker run on Mac OS X or Windows?\fP
+.sp
+Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a virtual machine on your box, and get the best of both worlds. Check out the \fI\%MacOSX\fP and \fI\%Windows\fP intallation guides.
+.sp
+\fB4. How do containers compare to virtual machines?\fP
+.sp
+They are complementary. VMs are best used to allocate chunks of hardware resources. Containers operate at the process level, which makes them very lightweight and perfect as a unit of software delivery.
+.sp
+\fB5. Can I help by adding some questions and answers?\fP
+.sp
+Definitely! You can fork \fI\%the repo\fP and edit the documentation sources.
+.sp
+\fB42. Where can I find more answers?\fP
+.sp
+You can find more answers on:
+.INDENT 0.0
+.IP \(bu 2
+\fI\%IRC: docker on freenode\fP
+.IP \(bu 2
+\fI\%Github\fP
+.IP \(bu 2
+\fI\%Ask questions on Stackoverflow\fP
+.IP \(bu 2
+\fI\%Join the conversation on Twitter\fP
+.UNINDENT
+.sp
+Looking for something else to read? Checkout the \fIhello_world\fP example.
+.SH AUTHOR
+Team Docker
+.SH COPYRIGHT
+2013, Team Docker
+.\" Generated by docutils manpage writer.
+.\" 
+.

+ 74 - 0
packaging/debian/lxc-docker.init

@@ -0,0 +1,74 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides:             lxc-docker
+# Required-Start:       $syslog $remote_fs
+# Required-Stop:        $syslog $remote_fs
+# Default-Start:        2 3 4 5
+# Default-Stop:         0 1 6
+# Short-Description:    Linux container runtime
+# Description:          Linux container runtime
+### END INIT INFO
+
+DOCKER=/usr/bin/lxc-docker
+
+# Check lxc-docker is present
+[ -x $DOCKER ] || (log_failure_msg "lxc-docker not present"; exit 1)
+
+PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
+
+# Get lsb functions
+. /lib/lsb/init-functions
+
+check_root_id ()
+{
+  if [ "$(id -u)" != "0" ]; then
+    log_failure_msg "LXC Docker must be run as root"; exit 1
+  fi
+}
+
+case "$1" in
+  start)
+    check_root_id || exit 1
+    log_begin_msg "Starting LXC Docker"
+    mount | grep cgroup >/dev/null || mount -t cgroup none /sys/fs/cgroup
+    start-stop-daemon --start --background --exec "$DOCKER" -- -d
+    log_end_msg $?
+    ;;
+
+  stop)
+    check_root_id || exit 1
+    log_begin_msg "Stopping LXC Docker"
+    docker_pid=`pgrep -f "$DOCKER -d"`
+    [ -n "$docker_pid" ] && kill $docker_pid
+    log_end_msg $?
+    ;;
+
+  restart)
+    check_root_id || exit 1
+    docker_pid=`pgrep -f "$DOCKER -d"`
+    [ -n "$docker_pid" ] && /etc/init.d/lxc-docker stop
+    /etc/init.d/lxc-docker start
+    ;;
+
+  force-reload)
+    check_root_id || exit 1
+    /etc/init.d/lxc-docker restart
+    ;;
+
+  status)
+    docker_pid=`pgrep -f "$DOCKER -d"`
+    if [ -z "$docker_pid" ] ; then
+      echo "lxc-docker not running"
+    else
+      echo "lxc-docker running (pid $docker_pid)"
+    fi
+    ;;
+
+  *)
+    echo "Usage: /etc/init.d/lxc-docker {start|stop|restart|status}"
+    exit 1
+    ;;
+esac
+
+exit 0

+ 0 - 13
packaging/debian/lxc-docker.postinst

@@ -1,13 +0,0 @@
-#!/bin/sh
-
-# Ensure cgroup is mounted
-if [ -z "`/bin/egrep -e '^cgroup' /etc/fstab`" ]; then
-    /bin/echo 'cgroup  /sys/fs/cgroup  cgroup  defaults  0   0' >>/etc/fstab
-fi
-if [ -z "`/bin/mount | /bin/egrep -e '^cgroup'`" ]; then
-    /bin/mount /sys/fs/cgroup
-fi
-
-# Start docker
-/usr/sbin/update-rc.d docker defaults
-/etc/init.d/docker start

+ 0 - 6
packaging/debian/rules

@@ -1,12 +1,6 @@
 #!/usr/bin/make -f
 #!/usr/bin/make -f
 # -*- makefile -*-
 # -*- makefile -*-
-# Sample debian/rules that uses debhelper.
-# This file was originally written by Joey Hess and Craig Small.
-# As a special exception, when this file is copied by dh-make into a
-# dh-make output file, you may use that output file without restriction.
-# This special exception was added by Craig Small in version 0.37 of dh-make.
 
 
-# Uncomment this to turn on verbose mode.
 #export DH_VERBOSE=1
 #export DH_VERBOSE=1
 
 
 %:
 %:

+ 0 - 748
registry.go

@@ -1,748 +0,0 @@
-package docker
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"github.com/dotcloud/docker/auth"
-	"github.com/shin-/cookiejar"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"os"
-	"path"
-	"strings"
-)
-
-//FIXME: Set the endpoint in a conf file or via commandline
-const INDEX_ENDPOINT = auth.INDEX_SERVER + "/v1"
-
-// Build an Image object from raw json data
-func NewImgJson(src []byte) (*Image, error) {
-	ret := &Image{}
-
-	Debugf("Json string: {%s}\n", src)
-	// FIXME: Is there a cleaner way to "purify" the input json?
-	if err := json.Unmarshal(src, ret); err != nil {
-		return nil, err
-	}
-	return ret, nil
-}
-
-func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
-	for _, cookie := range c.Jar.Cookies(req.URL) {
-		req.AddCookie(cookie)
-	}
-	return c.Do(req)
-}
-
-// Retrieve the history of a given image from the Registry.
-// Return a list of the parent's json (requested image included)
-func (graph *Graph) getRemoteHistory(imgId, registry string, token []string) ([]string, error) {
-	client := graph.getHttpClient()
-
-	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/ancestry", nil)
-	if err != nil {
-		return nil, err
-	}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
-	res, err := client.Do(req)
-	if err != nil || res.StatusCode != 200 {
-		if res != nil {
-			return nil, fmt.Errorf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgId)
-		}
-		return nil, err
-	}
-	defer res.Body.Close()
-
-	jsonString, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, fmt.Errorf("Error while reading the http response: %s\n", err)
-	}
-
-	Debugf("Ancestry: %s", jsonString)
-	history := new([]string)
-	if err := json.Unmarshal(jsonString, history); err != nil {
-		return nil, err
-	}
-	return *history, nil
-}
-
-func (graph *Graph) getHttpClient() *http.Client {
-	if graph.httpClient == nil {
-		graph.httpClient = &http.Client{}
-		graph.httpClient.Jar = cookiejar.NewCookieJar()
-	}
-	return graph.httpClient
-}
-
-// Check if an image exists in the Registry
-func (graph *Graph) LookupRemoteImage(imgId, registry string, authConfig *auth.AuthConfig) bool {
-	rt := &http.Transport{Proxy: http.ProxyFromEnvironment}
-
-	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
-	if err != nil {
-		return false
-	}
-	req.SetBasicAuth(authConfig.Username, authConfig.Password)
-	res, err := rt.RoundTrip(req)
-	return err == nil && res.StatusCode == 307
-}
-
-func (graph *Graph) getImagesInRepository(repository string, authConfig *auth.AuthConfig) ([]map[string]string, error) {
-	u := INDEX_ENDPOINT + "/repositories/" + repository + "/images"
-	req, err := http.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, err
-	}
-	if authConfig != nil && len(authConfig.Username) > 0 {
-		req.SetBasicAuth(authConfig.Username, authConfig.Password)
-	}
-	res, err := graph.getHttpClient().Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer res.Body.Close()
-
-	// Repository doesn't exist yet
-	if res.StatusCode == 404 {
-		return nil, nil
-	}
-
-	jsonData, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, err
-	}
-
-	imageList := []map[string]string{}
-
-	err = json.Unmarshal(jsonData, &imageList)
-	if err != nil {
-		Debugf("Body: %s (%s)\n", res.Body, u)
-		return nil, err
-	}
-
-	return imageList, nil
-}
-
-// Retrieve an image from the Registry.
-// Returns the Image object as well as the layer as an Archive (io.Reader)
-func (graph *Graph) getRemoteImage(stdout io.Writer, imgId, registry string, token []string) (*Image, Archive, error) {
-	client := graph.getHttpClient()
-
-	fmt.Fprintf(stdout, "Pulling %s metadata\r\n", imgId)
-	// Get the Json
-	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
-	if err != nil {
-		return nil, nil, fmt.Errorf("Failed to download json: %s", err)
-	}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
-	res, err := client.Do(req)
-	if err != nil {
-		return nil, nil, fmt.Errorf("Failed to download json: %s", err)
-	}
-	if res.StatusCode != 200 {
-		return nil, nil, fmt.Errorf("HTTP code %d", res.StatusCode)
-	}
-	defer res.Body.Close()
-
-	jsonString, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, nil, fmt.Errorf("Failed to download json: %s", err)
-	}
-
-	img, err := NewImgJson(jsonString)
-	if err != nil {
-		return nil, nil, fmt.Errorf("Failed to parse json: %s", err)
-	}
-	img.Id = imgId
-
-	// Get the layer
-	fmt.Fprintf(stdout, "Pulling %s fs layer\r\n", imgId)
-	req, err = http.NewRequest("GET", registry+"/images/"+imgId+"/layer", nil)
-	if err != nil {
-		return nil, nil, fmt.Errorf("Error while getting from the server: %s\n", err)
-	}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
-	res, err = client.Do(req)
-	if err != nil {
-		return nil, nil, err
-	}
-	return img, ProgressReader(res.Body, int(res.ContentLength), stdout, "Downloading %v/%v (%v)"), nil
-}
-
-func (graph *Graph) getRemoteTags(stdout io.Writer, registries []string, repository string, token []string) (map[string]string, error) {
-	client := graph.getHttpClient()
-	if strings.Count(repository, "/") == 0 {
-		// This will be removed once the Registry supports auto-resolution on
-		// the "library" namespace
-		repository = "library/" + repository
-	}
-	for _, host := range registries {
-		endpoint := fmt.Sprintf("https://%s/v1/repositories/%s/tags", host, repository)
-		req, err := http.NewRequest("GET", endpoint, nil)
-		if err != nil {
-			return nil, err
-		}
-		req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
-		res, err := client.Do(req)
-		defer res.Body.Close()
-		Debugf("Got status code %d from %s", res.StatusCode, endpoint)
-		if err != nil || (res.StatusCode != 200 && res.StatusCode != 404) {
-			continue
-		} else if res.StatusCode == 404 {
-			return nil, fmt.Errorf("Repository not found")
-		}
-
-		result := make(map[string]string)
-
-		rawJson, err := ioutil.ReadAll(res.Body)
-		if err != nil {
-			return nil, err
-		}
-		if err = json.Unmarshal(rawJson, &result); err != nil {
-			return nil, err
-		}
-		return result, nil
-	}
-	return nil, fmt.Errorf("Could not reach any registry endpoint")
-}
-
-func (graph *Graph) getImageForTag(stdout io.Writer, tag, remote, registry string, token []string) (string, error) {
-	client := graph.getHttpClient()
-
-	if !strings.Contains(remote, "/") {
-		remote = "library/" + remote
-	}
-
-	registryEndpoint := "https://" + registry + "/v1"
-	repositoryTarget := registryEndpoint + "/repositories/" + remote + "/tags/" + tag
-
-	req, err := http.NewRequest("GET", repositoryTarget, nil)
-	if err != nil {
-		return "", err
-	}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
-	res, err := client.Do(req)
-	if err != nil {
-		return "", fmt.Errorf("Error while retrieving repository info: %v", err)
-	}
-	defer res.Body.Close()
-	if res.StatusCode == 403 {
-		return "", fmt.Errorf("You aren't authorized to access this resource")
-	} else if res.StatusCode != 200 {
-		return "", fmt.Errorf("HTTP code: %d", res.StatusCode)
-	}
-
-	var imgId string
-	rawJson, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return "", err
-	}
-	if err = json.Unmarshal(rawJson, &imgId); err != nil {
-		return "", err
-	}
-	return imgId, nil
-}
-
-func (graph *Graph) PullImage(stdout io.Writer, imgId, registry string, token []string) error {
-	history, err := graph.getRemoteHistory(imgId, registry, token)
-	if err != nil {
-		return err
-	}
-	// FIXME: Try to stream the images?
-	// FIXME: Launch the getRemoteImage() in goroutines
-	for _, id := range history {
-		if !graph.Exists(id) {
-			img, layer, err := graph.getRemoteImage(stdout, id, registry, token)
-			if err != nil {
-				// FIXME: Keep goging in case of error?
-				return err
-			}
-			if err = graph.Register(layer, false, img); err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (graph *Graph) PullRepository(stdout io.Writer, remote, askedTag string, repositories *TagStore, authConfig *auth.AuthConfig) error {
-	client := graph.getHttpClient()
-
-	fmt.Fprintf(stdout, "Pulling repository %s from %s\r\n", remote, INDEX_ENDPOINT)
-	repositoryTarget := INDEX_ENDPOINT + "/repositories/" + remote + "/images"
-
-	req, err := http.NewRequest("GET", repositoryTarget, nil)
-	if err != nil {
-		return err
-	}
-	if authConfig != nil && len(authConfig.Username) > 0 {
-		req.SetBasicAuth(authConfig.Username, authConfig.Password)
-	}
-	req.Header.Set("X-Docker-Token", "true")
-
-	res, err := client.Do(req)
-	if err != nil {
-		return err
-	}
-	defer res.Body.Close()
-	if res.StatusCode == 401 {
-		return fmt.Errorf("Please login first (HTTP code %d)", res.StatusCode)
-	}
-	// TODO: Right now we're ignoring checksums in the response body.
-	// In the future, we need to use them to check image validity.
-	if res.StatusCode != 200 {
-		return fmt.Errorf("HTTP code: %d", res.StatusCode)
-	}
-
-	var token, endpoints []string
-	if res.Header.Get("X-Docker-Token") != "" {
-		token = res.Header["X-Docker-Token"]
-	}
-	if res.Header.Get("X-Docker-Endpoints") != "" {
-		endpoints = res.Header["X-Docker-Endpoints"]
-	} else {
-		return fmt.Errorf("Index response didn't contain any endpoints")
-	}
-
-	checksumsJson, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return err
-	}
-
-	// Reload the json file to make sure not to overwrite faster sums
-	err = func() error {
-		localChecksums := make(map[string]string)
-		remoteChecksums := []ImgListJson{}
-		checksumDictPth := path.Join(graph.Root, "checksums")
-
-		if err := json.Unmarshal(checksumsJson, &remoteChecksums); err != nil {
-			return err
-		}
-
-		graph.lockSumFile.Lock()
-		defer graph.lockSumFile.Unlock()
-
-		if checksumDict, err := ioutil.ReadFile(checksumDictPth); err == nil {
-			if err := json.Unmarshal(checksumDict, &localChecksums); err != nil {
-				return err
-			}
-		}
-
-		for _, elem := range remoteChecksums {
-			localChecksums[elem.Id] = elem.Checksum
-		}
-
-		checksumsJson, err = json.Marshal(localChecksums)
-		if err != nil {
-			return err
-		}
-		if err := ioutil.WriteFile(checksumDictPth, checksumsJson, 0600); err != nil {
-			return err
-		}
-		return nil
-	}()
-	if err != nil {
-		return err
-	}
-
-	var tagsList map[string]string
-	if askedTag == "" {
-		tagsList, err = graph.getRemoteTags(stdout, endpoints, remote, token)
-		if err != nil {
-			return err
-		}
-	} else {
-		tagsList = map[string]string{askedTag: ""}
-	}
-
-	for askedTag, imgId := range tagsList {
-		fmt.Fprintf(stdout, "Resolving tag \"%s:%s\" from %s\n", remote, askedTag, endpoints)
-		success := false
-		for _, registry := range endpoints {
-			if imgId == "" {
-				imgId, err = graph.getImageForTag(stdout, askedTag, remote, registry, token)
-				if err != nil {
-					fmt.Fprintf(stdout, "Error while retrieving image for tag: %v (%v) ; "+
-						"checking next endpoint", askedTag, err)
-					continue
-				}
-			}
-
-			if err := graph.PullImage(stdout, imgId, "https://"+registry+"/v1", token); err != nil {
-				return err
-			}
-
-			if err = repositories.Set(remote, askedTag, imgId, true); err != nil {
-				return err
-			}
-			success = true
-		}
-
-		if !success {
-			return fmt.Errorf("Could not find repository on any of the indexed registries.")
-		}
-	}
-
-	if err = repositories.Save(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Push a local image to the registry
-func (graph *Graph) PushImage(stdout io.Writer, img *Image, registry string, token []string) error {
-	registry = "https://" + registry + "/v1"
-
-	client := graph.getHttpClient()
-	jsonRaw, err := ioutil.ReadFile(path.Join(graph.Root, img.Id, "json"))
-	if err != nil {
-		return fmt.Errorf("Error while retreiving the path for {%s}: %s", img.Id, err)
-	}
-
-	fmt.Fprintf(stdout, "Pushing %s metadata\r\n", img.Id)
-
-	// FIXME: try json with UTF8
-	jsonData := strings.NewReader(string(jsonRaw))
-	req, err := http.NewRequest("PUT", registry+"/images/"+img.Id+"/json", jsonData)
-	if err != nil {
-		return err
-	}
-	req.Header.Add("Content-type", "application/json")
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
-
-	checksum, err := img.Checksum()
-	if err != nil {
-		return fmt.Errorf("Error while retrieving checksum for %s: %v", img.Id, err)
-	}
-	req.Header.Set("X-Docker-Checksum", checksum)
-	Debugf("Setting checksum for %s: %s", img.ShortId(), checksum)
-	res, err := doWithCookies(client, req)
-	if err != nil {
-		return fmt.Errorf("Failed to upload metadata: %s", err)
-	}
-	defer res.Body.Close()
-	if len(res.Cookies()) > 0 {
-		client.Jar.SetCookies(req.URL, res.Cookies())
-	}
-	if res.StatusCode != 200 {
-		errBody, err := ioutil.ReadAll(res.Body)
-		if err != nil {
-			return fmt.Errorf("HTTP code %d while uploading metadata and error when"+
-				" trying to parse response body: %v", res.StatusCode, err)
-		}
-		var jsonBody map[string]string
-		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
-			errBody = []byte(err.Error())
-		} else if jsonBody["error"] == "Image already exists" {
-			fmt.Fprintf(stdout, "Image %v already uploaded ; skipping\n", img.Id)
-			return nil
-		}
-		return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody)
-	}
-
-	fmt.Fprintf(stdout, "Pushing %s fs layer\r\n", img.Id)
-	root, err := img.root()
-	if err != nil {
-		return err
-	}
-
-	var layerData *TempArchive
-	// If the archive exists, use it
-	file, err := os.Open(layerArchivePath(root))
-	if err != nil {
-		if os.IsNotExist(err) {
-			// If the archive does not exist, create one from the layer
-			layerData, err = graph.TempLayerArchive(img.Id, Xz, stdout)
-			if err != nil {
-				return fmt.Errorf("Failed to generate layer archive: %s", err)
-			}
-		} else {
-			return err
-		}
-	} else {
-		defer file.Close()
-		st, err := file.Stat()
-		if err != nil {
-			return err
-		}
-		layerData = &TempArchive{file, st.Size()}
-	}
-
-	req3, err := http.NewRequest("PUT", registry+"/images/"+img.Id+"/layer",
-		ProgressReader(layerData, int(layerData.Size), stdout, ""))
-	if err != nil {
-		return err
-	}
-
-	req3.ContentLength = -1
-	req3.TransferEncoding = []string{"chunked"}
-	req3.Header.Set("Authorization", "Token "+strings.Join(token, ","))
-	res3, err := doWithCookies(client, req3)
-	if err != nil {
-		return fmt.Errorf("Failed to upload layer: %s", err)
-	}
-	defer res3.Body.Close()
-
-	if res3.StatusCode != 200 {
-		errBody, err := ioutil.ReadAll(res3.Body)
-		if err != nil {
-			return fmt.Errorf("HTTP code %d while uploading metadata and error when"+
-				" trying to parse response body: %v", res.StatusCode, err)
-		}
-		return fmt.Errorf("Received HTTP code %d while uploading layer: %s", res3.StatusCode, errBody)
-	}
-	return nil
-}
-
-// push a tag on the registry.
-// Remote has the format '<user>/<repo>
-func (graph *Graph) pushTag(remote, revision, tag, registry string, token []string) error {
-	// "jsonify" the string
-	revision = "\"" + revision + "\""
-	registry = "https://" + registry + "/v1"
-
-	Debugf("Pushing tags for rev [%s] on {%s}\n", revision, registry+"/users/"+remote+"/"+tag)
-
-	client := graph.getHttpClient()
-	req, err := http.NewRequest("PUT", registry+"/repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
-	if err != nil {
-		return err
-	}
-	req.Header.Add("Content-type", "application/json")
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
-	req.ContentLength = int64(len(revision))
-	res, err := doWithCookies(client, req)
-	if err != nil {
-		return err
-	}
-	res.Body.Close()
-	if res.StatusCode != 200 && res.StatusCode != 201 {
-		return fmt.Errorf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote)
-	}
-	return nil
-}
-
-// FIXME: this should really be PushTag
-func (graph *Graph) pushPrimitive(stdout io.Writer, remote, tag, imgId, registry string, token []string) error {
-	// Check if the local impage exists
-	img, err := graph.Get(imgId)
-	if err != nil {
-		fmt.Fprintf(stdout, "Skipping tag %s:%s: %s does not exist\r\n", remote, tag, imgId)
-		return nil
-	}
-	fmt.Fprintf(stdout, "Pushing image %s:%s\r\n", remote, tag)
-	// Push the image
-	if err = graph.PushImage(stdout, img, registry, token); err != nil {
-		return err
-	}
-	fmt.Fprintf(stdout, "Registering tag %s:%s\r\n", remote, tag)
-	// And then the tag
-	if err = graph.pushTag(remote, imgId, tag, registry, token); err != nil {
-		return err
-	}
-	return nil
-}
-
-// Retrieve the checksum of an image
-// Priority:
-// - Check on the stored checksums
-// - Check if the archive exists, if it does not, ask the registry
-// - If the archive does exists, process the checksum from it
-// - If the archive does not exists and not found on registry, process checksum from layer
-func (graph *Graph) getChecksum(imageId string) (string, error) {
-	// FIXME: Use in-memory map instead of reading the file each time
-	if sums, err := graph.getStoredChecksums(); err != nil {
-		return "", err
-	} else if checksum, exists := sums[imageId]; exists {
-		return checksum, nil
-	}
-
-	img, err := graph.Get(imageId)
-	if err != nil {
-		return "", err
-	}
-
-	if _, err := os.Stat(layerArchivePath(graph.imageRoot(imageId))); err != nil {
-		if os.IsNotExist(err) {
-			// TODO: Ask the registry for the checksum
-			//       As the archive is not there, it is supposed to come from a pull.
-		} else {
-			return "", err
-		}
-	}
-
-	checksum, err := img.Checksum()
-	if err != nil {
-		return "", err
-	}
-	return checksum, nil
-}
-
-type ImgListJson struct {
-	Id       string `json:"id"`
-	Checksum string `json:"checksum,omitempty"`
-	tag      string
-}
-
-// Push a repository to the registry.
-// Remote has the format '<user>/<repo>
-func (graph *Graph) PushRepository(stdout io.Writer, remote string, localRepo Repository, authConfig *auth.AuthConfig) error {
-	client := graph.getHttpClient()
-	// FIXME: Do not reset the cookie each time? (need to reset it in case updating latest of a repo and repushing)
-	client.Jar = cookiejar.NewCookieJar()
-	var imgList []*ImgListJson
-
-	fmt.Fprintf(stdout, "Processing checksums\n")
-	imageSet := make(map[string]struct{})
-
-	for tag, id := range localRepo {
-		img, err := graph.Get(id)
-		if err != nil {
-			return err
-		}
-		img.WalkHistory(func(img *Image) error {
-			if _, exists := imageSet[img.Id]; exists {
-				return nil
-			}
-			imageSet[img.Id] = struct{}{}
-			checksum, err := graph.getChecksum(img.Id)
-			if err != nil {
-				return err
-			}
-			imgList = append([]*ImgListJson{{
-				Id:       img.Id,
-				Checksum: checksum,
-				tag:      tag,
-			}}, imgList...)
-			return nil
-		})
-	}
-
-	imgListJson, err := json.Marshal(imgList)
-	if err != nil {
-		return err
-	}
-
-	Debugf("json sent: %s\n", imgListJson)
-
-	fmt.Fprintf(stdout, "Sending image list\n")
-	req, err := http.NewRequest("PUT", INDEX_ENDPOINT+"/repositories/"+remote+"/", bytes.NewReader(imgListJson))
-	if err != nil {
-		return err
-	}
-	req.SetBasicAuth(authConfig.Username, authConfig.Password)
-	req.ContentLength = int64(len(imgListJson))
-	req.Header.Set("X-Docker-Token", "true")
-
-	res, err := client.Do(req)
-	if err != nil {
-		return err
-	}
-	defer res.Body.Close()
-
-	for res.StatusCode >= 300 && res.StatusCode < 400 {
-		Debugf("Redirected to %s\n", res.Header.Get("Location"))
-		req, err = http.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJson))
-		if err != nil {
-			return err
-		}
-		req.SetBasicAuth(authConfig.Username, authConfig.Password)
-		req.ContentLength = int64(len(imgListJson))
-		req.Header.Set("X-Docker-Token", "true")
-
-		res, err = client.Do(req)
-		if err != nil {
-			return err
-		}
-		defer res.Body.Close()
-	}
-
-	if res.StatusCode != 200 && res.StatusCode != 201 {
-		errBody, err := ioutil.ReadAll(res.Body)
-		if err != nil {
-			return err
-		}
-		return fmt.Errorf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody)
-	}
-
-	var token, endpoints []string
-	if res.Header.Get("X-Docker-Token") != "" {
-		token = res.Header["X-Docker-Token"]
-		Debugf("Auth token: %v", token)
-	} else {
-		return fmt.Errorf("Index response didn't contain an access token")
-	}
-	if res.Header.Get("X-Docker-Endpoints") != "" {
-		endpoints = res.Header["X-Docker-Endpoints"]
-	} else {
-		return fmt.Errorf("Index response didn't contain any endpoints")
-	}
-
-	// FIXME: Send only needed images
-	for _, registry := range endpoints {
-		fmt.Fprintf(stdout, "Pushing repository %s to %s (%d tags)\r\n", remote, registry, len(localRepo))
-		// For each image within the repo, push them
-		for _, elem := range imgList {
-			if err := graph.pushPrimitive(stdout, remote, elem.tag, elem.Id, registry, token); err != nil {
-				// FIXME: Continue on error?
-				return err
-			}
-		}
-	}
-
-	req2, err := http.NewRequest("PUT", INDEX_ENDPOINT+"/repositories/"+remote+"/images", bytes.NewReader(imgListJson))
-	if err != nil {
-		return err
-	}
-	req2.SetBasicAuth(authConfig.Username, authConfig.Password)
-	req2.Header["X-Docker-Endpoints"] = endpoints
-	req2.ContentLength = int64(len(imgListJson))
-	res2, err := client.Do(req2)
-	if err != nil {
-		return err
-	}
-	defer res2.Body.Close()
-	if res2.StatusCode != 204 {
-		if errBody, err := ioutil.ReadAll(res2.Body); err != nil {
-			return err
-		} else {
-			return fmt.Errorf("Error: Status %d trying to push checksums %s: %s", res2.StatusCode, remote, errBody)
-		}
-	}
-
-	return nil
-}
-
-type SearchResults struct {
-	Query      string              `json:"query"`
-	NumResults int                 `json:"num_results"`
-	Results    []map[string]string `json:"results"`
-}
-
-func (graph *Graph) SearchRepositories(stdout io.Writer, term string) (*SearchResults, error) {
-	client := graph.getHttpClient()
-	u := INDEX_ENDPOINT + "/search?q=" + url.QueryEscape(term)
-	req, err := http.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, err
-	}
-	res, err := client.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer res.Body.Close()
-	if res.StatusCode != 200 {
-		return nil, fmt.Errorf("Unexepected status code %d", res.StatusCode)
-	}
-	rawData, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, err
-	}
-	result := new(SearchResults)
-	err = json.Unmarshal(rawData, result)
-	return result, err
-}

+ 471 - 0
registry/registry.go

@@ -0,0 +1,471 @@
+package registry
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/dotcloud/docker/auth"
+	"github.com/dotcloud/docker/utils"
+	"github.com/shin-/cookiejar"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+)
+
+var ErrAlreadyExists error = errors.New("Image already exists")
+
+func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
+	for _, cookie := range c.Jar.Cookies(req.URL) {
+		req.AddCookie(cookie)
+	}
+	return c.Do(req)
+}
+
+// Retrieve the history of a given image from the Registry.
+// Return a list of the parent's json (requested image included)
+func (r *Registry) GetRemoteHistory(imgId, registry string, token []string) ([]string, error) {
+	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/ancestry", nil)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
+	res, err := r.client.Do(req)
+	if err != nil || res.StatusCode != 200 {
+		if res != nil {
+			return nil, fmt.Errorf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgId)
+		}
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	jsonString, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, fmt.Errorf("Error while reading the http response: %s", err)
+	}
+
+	utils.Debugf("Ancestry: %s", jsonString)
+	history := new([]string)
+	if err := json.Unmarshal(jsonString, history); err != nil {
+		return nil, err
+	}
+	return *history, nil
+}
+
+// Check if an image exists in the Registry
+func (r *Registry) LookupRemoteImage(imgId, registry string, authConfig *auth.AuthConfig) bool {
+	rt := &http.Transport{Proxy: http.ProxyFromEnvironment}
+
+	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
+	if err != nil {
+		return false
+	}
+	req.SetBasicAuth(authConfig.Username, authConfig.Password)
+	res, err := rt.RoundTrip(req)
+	return err == nil && res.StatusCode == 307
+}
+
+func (r *Registry) getImagesInRepository(repository string, authConfig *auth.AuthConfig) ([]map[string]string, error) {
+	u := auth.IndexServerAddress() + "/repositories/" + repository + "/images"
+	req, err := http.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, err
+	}
+	if authConfig != nil && len(authConfig.Username) > 0 {
+		req.SetBasicAuth(authConfig.Username, authConfig.Password)
+	}
+	res, err := r.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Repository doesn't exist yet
+	if res.StatusCode == 404 {
+		return nil, nil
+	}
+
+	jsonData, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	imageList := []map[string]string{}
+	if err := json.Unmarshal(jsonData, &imageList); err != nil {
+		utils.Debugf("Body: %s (%s)\n", res.Body, u)
+		return nil, err
+	}
+
+	return imageList, nil
+}
+
+// Retrieve an image from the Registry.
+// Returns the Image object as well as the layer as an Archive (io.Reader)
+func (r *Registry) GetRemoteImageJson(imgId, registry string, token []string) ([]byte, error) {
+	// Get the Json
+	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to download json: %s", err)
+	}
+	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
+	res, err := r.client.Do(req)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to download json: %s", err)
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		return nil, fmt.Errorf("HTTP code %d", res.StatusCode)
+	}
+	jsonString, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
+	}
+	return jsonString, nil
+}
+
+func (r *Registry) GetRemoteImageLayer(imgId, registry string, token []string) (io.ReadCloser, int, error) {
+	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/layer", nil)
+	if err != nil {
+		return nil, -1, fmt.Errorf("Error while getting from the server: %s\n", err)
+	}
+	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
+	res, err := r.client.Do(req)
+	if err != nil {
+		return nil, -1, err
+	}
+	return res.Body, int(res.ContentLength), nil
+}
+
+func (r *Registry) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) {
+	if strings.Count(repository, "/") == 0 {
+		// This will be removed once the Registry supports auto-resolution on
+		// the "library" namespace
+		repository = "library/" + repository
+	}
+	for _, host := range registries {
+		endpoint := fmt.Sprintf("https://%s/v1/repositories/%s/tags", host, repository)
+		req, err := http.NewRequest("GET", endpoint, nil)
+		if err != nil {
+			return nil, err
+		}
+		req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
+		res, err := r.client.Do(req)
+		defer res.Body.Close()
+		utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
+		if err != nil || (res.StatusCode != 200 && res.StatusCode != 404) {
+			continue
+		} else if res.StatusCode == 404 {
+			return nil, fmt.Errorf("Repository not found")
+		}
+
+		result := make(map[string]string)
+
+		rawJson, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return nil, err
+		}
+		if err := json.Unmarshal(rawJson, &result); err != nil {
+			return nil, err
+		}
+		return result, nil
+	}
+	return nil, fmt.Errorf("Could not reach any registry endpoint")
+}
+
+func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
+	repositoryTarget := auth.IndexServerAddress() + "/repositories/" + remote + "/images"
+
+	req, err := http.NewRequest("GET", repositoryTarget, nil)
+	if err != nil {
+		return nil, err
+	}
+	if r.authConfig != nil && len(r.authConfig.Username) > 0 {
+		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
+	}
+	req.Header.Set("X-Docker-Token", "true")
+
+	res, err := r.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	if res.StatusCode == 401 {
+		return nil, fmt.Errorf("Please login first (HTTP code %d)", res.StatusCode)
+	}
+	// TODO: Right now we're ignoring checksums in the response body.
+	// In the future, we need to use them to check image validity.
+	if res.StatusCode != 200 {
+		return nil, fmt.Errorf("HTTP code: %d", res.StatusCode)
+	}
+
+	var tokens []string
+	if res.Header.Get("X-Docker-Token") != "" {
+		tokens = res.Header["X-Docker-Token"]
+	}
+
+	var endpoints []string
+	if res.Header.Get("X-Docker-Endpoints") != "" {
+		endpoints = res.Header["X-Docker-Endpoints"]
+	} else {
+		return nil, fmt.Errorf("Index response didn't contain any endpoints")
+	}
+
+	checksumsJson, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, err
+	}
+	remoteChecksums := []*ImgData{}
+	if err := json.Unmarshal(checksumsJson, &remoteChecksums); err != nil {
+		return nil, err
+	}
+
+	// Forge a better object from the retrieved data
+	imgsData := make(map[string]*ImgData)
+	for _, elem := range remoteChecksums {
+		imgsData[elem.Id] = elem
+	}
+
+	return &RepositoryData{
+		ImgList:   imgsData,
+		Endpoints: endpoints,
+		Tokens:    tokens,
+	}, nil
+}
+
+// Push a local image to the registry
+func (r *Registry) PushImageJsonRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error {
+	registry = "https://" + registry + "/v1"
+	// FIXME: try json with UTF8
+	req, err := http.NewRequest("PUT", registry+"/images/"+imgData.Id+"/json", strings.NewReader(string(jsonRaw)))
+	if err != nil {
+		return err
+	}
+	req.Header.Add("Content-type", "application/json")
+	req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
+	req.Header.Set("X-Docker-Checksum", imgData.Checksum)
+
+	utils.Debugf("Setting checksum for %s: %s", imgData.Id, imgData.Checksum)
+	res, err := doWithCookies(r.client, req)
+	if err != nil {
+		return fmt.Errorf("Failed to upload metadata: %s", err)
+	}
+	defer res.Body.Close()
+	if len(res.Cookies()) > 0 {
+		r.client.Jar.SetCookies(req.URL, res.Cookies())
+	}
+	if res.StatusCode != 200 {
+		errBody, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
+		}
+		var jsonBody map[string]string
+		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
+			errBody = []byte(err.Error())
+		} else if jsonBody["error"] == "Image already exists" {
+			return ErrAlreadyExists
+		}
+		return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody)
+	}
+	return nil
+}
+
+func (r *Registry) PushImageLayerRegistry(imgId string, layer io.Reader, registry string, token []string) error {
+	registry = "https://" + registry + "/v1"
+	req, err := http.NewRequest("PUT", registry+"/images/"+imgId+"/layer", layer)
+	if err != nil {
+		return err
+	}
+	req.ContentLength = -1
+	req.TransferEncoding = []string{"chunked"}
+	req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
+	res, err := doWithCookies(r.client, req)
+	if err != nil {
+		return fmt.Errorf("Failed to upload layer: %s", err)
+	}
+	defer res.Body.Close()
+
+	if res.StatusCode != 200 {
+		errBody, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
+		}
+		return fmt.Errorf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody)
+	}
+	return nil
+}
+
+// push a tag on the registry.
+// Remote has the format '<user>/<repo>
+func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
+	// "jsonify" the string
+	revision = "\"" + revision + "\""
+	registry = "https://" + registry + "/v1"
+
+	req, err := http.NewRequest("PUT", registry+"/repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
+	if err != nil {
+		return err
+	}
+	req.Header.Add("Content-type", "application/json")
+	req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
+	req.ContentLength = int64(len(revision))
+	res, err := doWithCookies(r.client, req)
+	if err != nil {
+		return err
+	}
+	res.Body.Close()
+	if res.StatusCode != 200 && res.StatusCode != 201 {
+		return fmt.Errorf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote)
+	}
+	return nil
+}
+
+func (r *Registry) PushImageJsonIndex(remote string, imgList []*ImgData, validate bool) (*RepositoryData, error) {
+	imgListJson, err := json.Marshal(imgList)
+	if err != nil {
+		return nil, err
+	}
+	var suffix string
+	if validate {
+		suffix = "images"
+	}
+	req, err := http.NewRequest("PUT", auth.IndexServerAddress()+"/repositories/"+remote+"/"+suffix, bytes.NewReader(imgListJson))
+	if err != nil {
+		return nil, err
+	}
+	req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
+	req.ContentLength = int64(len(imgListJson))
+	req.Header.Set("X-Docker-Token", "true")
+
+	res, err := r.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Redirect if necessary
+	for res.StatusCode >= 300 && res.StatusCode < 400 {
+		utils.Debugf("Redirected to %s\n", res.Header.Get("Location"))
+		req, err = http.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJson))
+		if err != nil {
+			return nil, err
+		}
+		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
+		req.ContentLength = int64(len(imgListJson))
+		req.Header.Set("X-Docker-Token", "true")
+
+		res, err = r.client.Do(req)
+		if err != nil {
+			return nil, err
+		}
+		defer res.Body.Close()
+	}
+
+	var tokens, endpoints []string
+	if !validate {
+		if res.StatusCode != 200 && res.StatusCode != 201 {
+			errBody, err := ioutil.ReadAll(res.Body)
+			if err != nil {
+				return nil, err
+			}
+			return nil, fmt.Errorf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody)
+		}
+		if res.Header.Get("X-Docker-Token") != "" {
+			tokens = res.Header["X-Docker-Token"]
+			utils.Debugf("Auth token: %v", tokens)
+		} else {
+			return nil, fmt.Errorf("Index response didn't contain an access token")
+		}
+
+		if res.Header.Get("X-Docker-Endpoints") != "" {
+			endpoints = res.Header["X-Docker-Endpoints"]
+		} else {
+			return nil, fmt.Errorf("Index response didn't contain any endpoints")
+		}
+	}
+	if validate {
+		if res.StatusCode != 204 {
+			if errBody, err := ioutil.ReadAll(res.Body); err != nil {
+				return nil, err
+			} else {
+				return nil, fmt.Errorf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody)
+			}
+		}
+	}
+
+	return &RepositoryData{
+		Tokens:    tokens,
+		Endpoints: endpoints,
+	}, nil
+}
+
+func (r *Registry) SearchRepositories(term string) (*SearchResults, error) {
+	u := auth.IndexServerAddress() + "/search?q=" + url.QueryEscape(term)
+	req, err := http.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, err
+	}
+	res, err := r.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		return nil, fmt.Errorf("Unexepected status code %d", res.StatusCode)
+	}
+	rawData, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, err
+	}
+	result := new(SearchResults)
+	err = json.Unmarshal(rawData, result)
+	return result, err
+}
+
+func (r *Registry) ResetClient(authConfig *auth.AuthConfig) {
+	r.authConfig = authConfig
+	r.client.Jar = cookiejar.NewCookieJar()
+}
+
+func (r *Registry) GetAuthConfig() *auth.AuthConfig {
+	return &auth.AuthConfig{
+		Username: r.authConfig.Username,
+		Email:    r.authConfig.Email,
+	}
+}
+
+type SearchResults struct {
+	Query      string              `json:"query"`
+	NumResults int                 `json:"num_results"`
+	Results    []map[string]string `json:"results"`
+}
+
+type RepositoryData struct {
+	ImgList   map[string]*ImgData
+	Endpoints []string
+	Tokens    []string
+}
+
+type ImgData struct {
+	Id       string `json:"id"`
+	Checksum string `json:"checksum,omitempty"`
+	Tag      string `json:",omitempty"`
+}
+
+type Registry struct {
+	client     *http.Client
+	authConfig *auth.AuthConfig
+}
+
+func NewRegistry(root string) *Registry {
+	// If the auth file does not exist, keep going
+	authConfig, _ := auth.LoadConfig(root)
+
+	r := &Registry{
+		authConfig: authConfig,
+		client:     &http.Client{},
+	}
+	r.client.Jar = cookiejar.NewCookieJar()
+	return r
+}

+ 168 - 0
registry/registry_test.go

@@ -0,0 +1,168 @@
+package registry
+
+// import (
+// 	"crypto/rand"
+// 	"encoding/hex"
+// 	"github.com/dotcloud/docker"
+// 	"github.com/dotcloud/docker/auth"
+// 	"io/ioutil"
+// 	"os"
+// 	"path"
+// 	"testing"
+// )
+
+// func newTestRuntime() (*Runtime, error) {
+// 	root, err := ioutil.TempDir("", "docker-test")
+// 	if err != nil {
+// 		return nil, err
+// 	}
+// 	if err := os.Remove(root); err != nil {
+// 		return nil, err
+// 	}
+
+// 	if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
+// 		return nil, err
+// 	}
+
+// 	return runtime, nil
+// }
+
+// func TestPull(t *testing.T) {
+// 	os.Setenv("DOCKER_INDEX_URL", "")
+// 	runtime, err := newTestRuntime()
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	defer nuke(runtime)
+
+// 	err = runtime.graph.PullRepository(ioutil.Discard, "busybox", "", runtime.repositories, nil)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	img, err := runtime.repositories.LookupImage("busybox")
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	// Try to run something on this image to make sure the layer's been downloaded properly.
+// 	config, _, err := docker.ParseRun([]string{img.Id, "echo", "Hello World"}, runtime.capabilities)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	b := NewBuilder(runtime)
+// 	container, err := b.Create(config)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	if err := container.Start(); err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	if status := container.Wait(); status != 0 {
+// 		t.Fatalf("Expected status code 0, found %d instead", status)
+// 	}
+// }
+
+// func TestPullTag(t *testing.T) {
+// 	os.Setenv("DOCKER_INDEX_URL", "")
+// 	runtime, err := newTestRuntime()
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	defer nuke(runtime)
+
+// 	err = runtime.graph.PullRepository(ioutil.Discard, "ubuntu", "12.04", runtime.repositories, nil)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	_, err = runtime.repositories.LookupImage("ubuntu:12.04")
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	img2, err := runtime.repositories.LookupImage("ubuntu:12.10")
+// 	if img2 != nil {
+// 		t.Fatalf("Expected nil image but found %v instead", img2.Id)
+// 	}
+// }
+
+// func login(runtime *Runtime) error {
+// 	authConfig := auth.NewAuthConfig("unittester", "surlautrerivejetattendrai", "noise+unittester@dotcloud.com", runtime.root)
+// 	runtime.authConfig = authConfig
+// 	_, err := auth.Login(authConfig)
+// 	return err
+// }
+
+// func TestPush(t *testing.T) {
+// 	os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
+// 	defer os.Setenv("DOCKER_INDEX_URL", "")
+// 	runtime, err := newTestRuntime()
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	defer nuke(runtime)
+
+// 	err = login(runtime)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	err = runtime.graph.PullRepository(ioutil.Discard, "joffrey/busybox", "", runtime.repositories, nil)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	tokenBuffer := make([]byte, 16)
+// 	_, err = rand.Read(tokenBuffer)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	token := hex.EncodeToString(tokenBuffer)[:29]
+// 	config, _, err := ParseRun([]string{"joffrey/busybox", "touch", "/" + token}, runtime.capabilities)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	b := NewBuilder(runtime)
+// 	container, err := b.Create(config)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	if err := container.Start(); err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	if status := container.Wait(); status != 0 {
+// 		t.Fatalf("Expected status code 0, found %d instead", status)
+// 	}
+
+// 	img, err := b.Commit(container, "unittester/"+token, "", "", "", nil)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	repo := runtime.repositories.Repositories["unittester/"+token]
+// 	err = runtime.graph.PushRepository(ioutil.Discard, "unittester/"+token, repo, runtime.authConfig)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	// Remove image so we can pull it again
+// 	if err := runtime.graph.Delete(img.Id); err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	err = runtime.graph.PullRepository(ioutil.Discard, "unittester/"+token, "", runtime.repositories, runtime.authConfig)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	layerPath, err := img.layer()
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	if _, err := os.Stat(path.Join(layerPath, token)); err != nil {
+// 		t.Fatalf("Error while trying to retrieve token file: %v", err)
+// 	}
+// }

+ 18 - 24
runtime.go

@@ -3,7 +3,7 @@ package docker
 import (
 import (
 	"container/list"
 	"container/list"
 	"fmt"
 	"fmt"
-	"github.com/dotcloud/docker/auth"
+	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"log"
 	"log"
@@ -26,18 +26,18 @@ type Runtime struct {
 	networkManager *NetworkManager
 	networkManager *NetworkManager
 	graph          *Graph
 	graph          *Graph
 	repositories   *TagStore
 	repositories   *TagStore
-	authConfig     *auth.AuthConfig
-	idIndex        *TruncIndex
+	idIndex        *utils.TruncIndex
 	capabilities   *Capabilities
 	capabilities   *Capabilities
-	kernelVersion  *KernelVersionInfo
+	kernelVersion  *utils.KernelVersionInfo
 	autoRestart    bool
 	autoRestart    bool
 	volumes        *Graph
 	volumes        *Graph
+	srv            *Server
 }
 }
 
 
 var sysInitPath string
 var sysInitPath string
 
 
 func init() {
 func init() {
-	sysInitPath = SelfPath()
+	sysInitPath = utils.SelfPath()
 }
 }
 
 
 func (runtime *Runtime) List() []*Container {
 func (runtime *Runtime) List() []*Container {
@@ -113,13 +113,13 @@ func (runtime *Runtime) Register(container *Container) error {
 	container.runtime = runtime
 	container.runtime = runtime
 
 
 	// Attach to stdout and stderr
 	// Attach to stdout and stderr
-	container.stderr = newWriteBroadcaster()
-	container.stdout = newWriteBroadcaster()
+	container.stderr = utils.NewWriteBroadcaster()
+	container.stdout = utils.NewWriteBroadcaster()
 	// Attach to stdin
 	// Attach to stdin
 	if container.Config.OpenStdin {
 	if container.Config.OpenStdin {
 		container.stdin, container.stdinPipe = io.Pipe()
 		container.stdin, container.stdinPipe = io.Pipe()
 	} else {
 	} else {
-		container.stdinPipe = NopWriteCloser(ioutil.Discard) // Silently drop stdin
+		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
 	}
 	}
 	// done
 	// done
 	runtime.containers.PushBack(container)
 	runtime.containers.PushBack(container)
@@ -137,9 +137,9 @@ func (runtime *Runtime) Register(container *Container) error {
 			return err
 			return err
 		} else {
 		} else {
 			if !strings.Contains(string(output), "RUNNING") {
 			if !strings.Contains(string(output), "RUNNING") {
-				Debugf("Container %s was supposed to be running be is not.", container.Id)
+				utils.Debugf("Container %s was supposed to be running be is not.", container.Id)
 				if runtime.autoRestart {
 				if runtime.autoRestart {
-					Debugf("Restarting")
+					utils.Debugf("Restarting")
 					container.State.Ghost = false
 					container.State.Ghost = false
 					container.State.setStopped(0)
 					container.State.setStopped(0)
 					if err := container.Start(); err != nil {
 					if err := container.Start(); err != nil {
@@ -147,7 +147,7 @@ func (runtime *Runtime) Register(container *Container) error {
 					}
 					}
 					nomonitor = true
 					nomonitor = true
 				} else {
 				} else {
-					Debugf("Marking as stopped")
+					utils.Debugf("Marking as stopped")
 					container.State.setStopped(-127)
 					container.State.setStopped(-127)
 					if err := container.ToDisk(); err != nil {
 					if err := container.ToDisk(); err != nil {
 						return err
 						return err
@@ -168,7 +168,7 @@ func (runtime *Runtime) Register(container *Container) error {
 	return nil
 	return nil
 }
 }
 
 
-func (runtime *Runtime) LogToDisk(src *writeBroadcaster, dst string) error {
+func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst string) error {
 	log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
 	log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -215,16 +215,16 @@ func (runtime *Runtime) restore() error {
 		id := v.Name()
 		id := v.Name()
 		container, err := runtime.Load(id)
 		container, err := runtime.Load(id)
 		if err != nil {
 		if err != nil {
-			Debugf("Failed to load container %v: %v", id, err)
+			utils.Debugf("Failed to load container %v: %v", id, err)
 			continue
 			continue
 		}
 		}
-		Debugf("Loaded container %v", container.Id)
+		utils.Debugf("Loaded container %v", container.Id)
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 func (runtime *Runtime) UpdateCapabilities(quiet bool) {
 func (runtime *Runtime) UpdateCapabilities(quiet bool) {
-	if cgroupMemoryMountpoint, err := FindCgroupMountpoint("memory"); err != nil {
+	if cgroupMemoryMountpoint, err := utils.FindCgroupMountpoint("memory"); err != nil {
 		if !quiet {
 		if !quiet {
 			log.Printf("WARNING: %s\n", err)
 			log.Printf("WARNING: %s\n", err)
 		}
 		}
@@ -251,11 +251,11 @@ func NewRuntime(autoRestart bool) (*Runtime, error) {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	if k, err := GetKernelVersion(); err != nil {
+	if k, err := utils.GetKernelVersion(); err != nil {
 		log.Printf("WARNING: %s\n", err)
 		log.Printf("WARNING: %s\n", err)
 	} else {
 	} else {
 		runtime.kernelVersion = k
 		runtime.kernelVersion = k
-		if CompareKernelVersion(k, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
+		if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
 			log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
 			log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
 		}
 		}
 	}
 	}
@@ -289,11 +289,6 @@ func NewRuntimeFromDirectory(root string, autoRestart bool) (*Runtime, error) {
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	authConfig, err := auth.LoadConfig(root)
-	if err != nil && authConfig == nil {
-		// If the auth file does not exist, keep going
-		return nil, err
-	}
 	runtime := &Runtime{
 	runtime := &Runtime{
 		root:           root,
 		root:           root,
 		repository:     runtimeRepo,
 		repository:     runtimeRepo,
@@ -301,8 +296,7 @@ func NewRuntimeFromDirectory(root string, autoRestart bool) (*Runtime, error) {
 		networkManager: netManager,
 		networkManager: netManager,
 		graph:          g,
 		graph:          g,
 		repositories:   repositories,
 		repositories:   repositories,
-		authConfig:     authConfig,
-		idIndex:        NewTruncIndex(),
+		idIndex:        utils.NewTruncIndex(),
 		capabilities:   &Capabilities{},
 		capabilities:   &Capabilities{},
 		autoRestart:    autoRestart,
 		autoRestart:    autoRestart,
 		volumes:        volumes,
 		volumes:        volumes,

+ 5 - 2
runtime_test.go

@@ -2,6 +2,8 @@ package docker
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/registry"
+	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"net"
 	"net"
@@ -48,7 +50,7 @@ func layerArchive(tarfile string) (io.Reader, error) {
 
 
 func init() {
 func init() {
 	// Hack to run sys init during unit testing
 	// Hack to run sys init during unit testing
-	if SelfPath() == "/sbin/init" {
+	if utils.SelfPath() == "/sbin/init" {
 		SysInit()
 		SysInit()
 		return
 		return
 	}
 	}
@@ -69,7 +71,8 @@ func init() {
 
 
 	// Create the "Server"
 	// Create the "Server"
 	srv := &Server{
 	srv := &Server{
-		runtime: runtime,
+		runtime:  runtime,
+		registry: registry.NewRegistry(runtime.root),
 	}
 	}
 	// Retrieve the Image
 	// Retrieve the Image
 	if err := srv.ImagePull(unitTestImageName, "", "", os.Stdout); err != nil {
 	if err := srv.ImagePull(unitTestImageName, "", "", os.Stdout); err != nil {

+ 359 - 76
server.go

@@ -2,11 +2,16 @@ package docker
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/auth"
+	"github.com/dotcloud/docker/registry"
+	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
+	"io/ioutil"
 	"log"
 	"log"
 	"net/http"
 	"net/http"
 	"net/url"
 	"net/url"
 	"os"
 	"os"
+	"path"
 	"runtime"
 	"runtime"
 	"strings"
 	"strings"
 )
 )
@@ -44,7 +49,7 @@ func (srv *Server) ContainerExport(name string, out io.Writer) error {
 }
 }
 
 
 func (srv *Server) ImagesSearch(term string) ([]ApiSearch, error) {
 func (srv *Server) ImagesSearch(term string) ([]ApiSearch, error) {
-	results, err := srv.runtime.graph.SearchRepositories(nil, term)
+	results, err := srv.registry.SearchRepositories(term)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -54,7 +59,7 @@ func (srv *Server) ImagesSearch(term string) ([]ApiSearch, error) {
 		var out ApiSearch
 		var out ApiSearch
 		out.Description = repo["description"]
 		out.Description = repo["description"]
 		if len(out.Description) > 45 {
 		if len(out.Description) > 45 {
-			out.Description = Trunc(out.Description, 42) + "..."
+			out.Description = utils.Trunc(out.Description, 42) + "..."
 		}
 		}
 		out.Name = repo["name"]
 		out.Name = repo["name"]
 		outs = append(outs, out)
 		outs = append(outs, out)
@@ -63,12 +68,13 @@ func (srv *Server) ImagesSearch(term string) ([]ApiSearch, error) {
 }
 }
 
 
 func (srv *Server) ImageInsert(name, url, path string, out io.Writer) error {
 func (srv *Server) ImageInsert(name, url, path string, out io.Writer) error {
+	out = utils.NewWriteFlusher(out)
 	img, err := srv.runtime.repositories.LookupImage(name)
 	img, err := srv.runtime.repositories.LookupImage(name)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	file, err := Download(url, out)
+	file, err := utils.Download(url, out)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -85,7 +91,7 @@ func (srv *Server) ImageInsert(name, url, path string, out io.Writer) error {
 		return err
 		return err
 	}
 	}
 
 
-	if err := c.Inject(ProgressReader(file.Body, int(file.ContentLength), out, "Downloading %v/%v (%v)"), path); err != nil {
+	if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, "Downloading %v/%v (%v)"), path); err != nil {
 		return err
 		return err
 	}
 	}
 	// FIXME: Handle custom repo, tag comment, author
 	// FIXME: Handle custom repo, tag comment, author
@@ -124,7 +130,7 @@ func (srv *Server) ImagesViz(out io.Writer) error {
 
 
 	for name, repository := range srv.runtime.repositories.Repositories {
 	for name, repository := range srv.runtime.repositories.Repositories {
 		for tag, id := range repository {
 		for tag, id := range repository {
-			reporefs[TruncateId(id)] = append(reporefs[TruncateId(id)], fmt.Sprintf("%s:%s", name, tag))
+			reporefs[utils.TruncateId(id)] = append(reporefs[utils.TruncateId(id)], fmt.Sprintf("%s:%s", name, tag))
 		}
 		}
 	}
 	}
 
 
@@ -135,9 +141,11 @@ func (srv *Server) ImagesViz(out io.Writer) error {
 	return nil
 	return nil
 }
 }
 
 
-func (srv *Server) Images(all, only_ids bool, filter string) ([]ApiImages, error) {
-	var allImages map[string]*Image
-	var err error
+func (srv *Server) Images(all bool, filter string) ([]ApiImages, error) {
+	var (
+		allImages map[string]*Image
+		err       error
+	)
 	if all {
 	if all {
 		allImages, err = srv.runtime.graph.Map()
 		allImages, err = srv.runtime.graph.Map()
 	} else {
 	} else {
@@ -146,7 +154,7 @@ func (srv *Server) Images(all, only_ids bool, filter string) ([]ApiImages, error
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	var outs []ApiImages = []ApiImages{} //produce [] when empty instead of 'null'
+	outs := []ApiImages{} //produce [] when empty instead of 'null'
 	for name, repository := range srv.runtime.repositories.Repositories {
 	for name, repository := range srv.runtime.repositories.Repositories {
 		if filter != "" && name != filter {
 		if filter != "" && name != filter {
 			continue
 			continue
@@ -159,33 +167,23 @@ func (srv *Server) Images(all, only_ids bool, filter string) ([]ApiImages, error
 				continue
 				continue
 			}
 			}
 			delete(allImages, id)
 			delete(allImages, id)
-			if !only_ids {
-				out.Repository = name
-				out.Tag = tag
-				out.Id = TruncateId(id)
-				out.Created = image.Created.Unix()
-				out.Size = image.Size
-				out.ParentSize = image.getVirtualSize(0)
-			} else {
-				out.Id = image.ShortId()
-			}
+			out.Repository = name
+			out.Tag = tag
+			out.Id = image.Id
+			out.Created = image.Created.Unix()
+			out.Size = image.Size
+			out.ParentSize = image.getVirtualSize(0)
 			outs = append(outs, out)
 			outs = append(outs, out)
 		}
 		}
 	}
 	}
 	// Display images which aren't part of a
 	// Display images which aren't part of a
 	if filter == "" {
 	if filter == "" {
-		for id, image := range allImages {
+		for _, image := range allImages {
 			var out ApiImages
 			var out ApiImages
-			if !only_ids {
-				out.Repository = "<none>"
-				out.Tag = "<none>"
-				out.Id = TruncateId(id)
-				out.Created = image.Created.Unix()
-				out.Size = image.Size
-				out.ParentSize = image.getVirtualSize(0)
-			} else {
-				out.Id = image.ShortId()
-			}
+			out.Id = image.Id
+			out.Created = image.Created.Unix()
+			out.Size = image.Size
+			out.ParentSize = image.getVirtualSize(0)
 			outs = append(outs, out)
 			outs = append(outs, out)
 		}
 		}
 	}
 	}
@@ -207,7 +205,7 @@ func (srv *Server) DockerInfo() ApiInfo {
 	out.GoVersion = runtime.Version()
 	out.GoVersion = runtime.Version()
 	if os.Getenv("DEBUG") != "" {
 	if os.Getenv("DEBUG") != "" {
 		out.Debug = true
 		out.Debug = true
-		out.NFd = getTotalUsedFds()
+		out.NFd = utils.GetTotalUsedFds()
 		out.NGoroutines = runtime.NumGoroutine()
 		out.NGoroutines = runtime.NumGoroutine()
 	}
 	}
 	return out
 	return out
@@ -239,7 +237,7 @@ func (srv *Server) ContainerChanges(name string) ([]Change, error) {
 	return nil, fmt.Errorf("No such container: %s", name)
 	return nil, fmt.Errorf("No such container: %s", name)
 }
 }
 
 
-func (srv *Server) Containers(all, trunc_cmd, only_ids bool, n int, since, before string) []ApiContainers {
+func (srv *Server) Containers(all bool, n int, since, before string) []ApiContainers {
 	var foundBefore bool
 	var foundBefore bool
 	var displayed int
 	var displayed int
 	retContainers := []ApiContainers{}
 	retContainers := []ApiContainers{}
@@ -268,24 +266,13 @@ func (srv *Server) Containers(all, trunc_cmd, only_ids bool, n int, since, befor
 		c := ApiContainers{
 		c := ApiContainers{
 			Id: container.Id,
 			Id: container.Id,
 		}
 		}
-		if trunc_cmd {
-			c = ApiContainers{
-				Id: container.ShortId(),
-			}
-		}
+		c.Image = srv.runtime.repositories.ImageName(container.Image)
+		c.Command = fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))
+		c.Created = container.Created.Unix()
+		c.Status = container.State.String()
+		c.Ports = container.NetworkSettings.PortMappingHuman()
+		c.SizeRw, c.SizeRootFs = container.GetSize()
 
 
-		if !only_ids {
-			command := fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))
-			if trunc_cmd {
-				command = Trunc(command, 20)
-			}
-			c.Image = srv.runtime.repositories.ImageName(container.Image)
-			c.Command = command
-			c.Created = container.Created.Unix()
-			c.Status = container.State.String()
-			c.Ports = container.NetworkSettings.PortMappingHuman()
-			c.SizeRw, c.SizeRootFs = container.GetSize()
-		}
 		retContainers = append(retContainers, c)
 		retContainers = append(retContainers, c)
 	}
 	}
 	return retContainers
 	return retContainers
@@ -310,26 +297,299 @@ func (srv *Server) ContainerTag(name, repo, tag string, force bool) error {
 	return nil
 	return nil
 }
 }
 
 
+func (srv *Server) pullImage(out io.Writer, imgId, registry string, token []string) error {
+	out = utils.NewWriteFlusher(out)
+	history, err := srv.registry.GetRemoteHistory(imgId, registry, token)
+	if err != nil {
+		return err
+	}
+
+	// FIXME: Try to stream the images?
+	// FIXME: Launch the getRemoteImage() in goroutines
+	for _, id := range history {
+		if !srv.runtime.graph.Exists(id) {
+			fmt.Fprintf(out, "Pulling %s metadata\r\n", id)
+			imgJson, err := srv.registry.GetRemoteImageJson(id, registry, token)
+			if err != nil {
+				// FIXME: Keep goging in case of error?
+				return err
+			}
+			img, err := NewImgJson(imgJson)
+			if err != nil {
+				return fmt.Errorf("Failed to parse json: %s", err)
+			}
+
+			// Get the layer
+			fmt.Fprintf(out, "Pulling %s fs layer\r\n", img.Id)
+			layer, contentLength, err := srv.registry.GetRemoteImageLayer(img.Id, registry, token)
+			if err != nil {
+				return err
+			}
+			if err := srv.runtime.graph.Register(utils.ProgressReader(layer, contentLength, out, "Downloading %v/%v (%v)"), false, img); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (srv *Server) pullRepository(out io.Writer, remote, askedTag string) error {
+	out = utils.NewWriteFlusher(out)
+	fmt.Fprintf(out, "Pulling repository %s from %s\r\n", remote, auth.IndexServerAddress())
+	repoData, err := srv.registry.GetRepositoryData(remote)
+	if err != nil {
+		return err
+	}
+
+	utils.Debugf("Updating checksums")
+	// Reload the json file to make sure not to overwrite faster sums
+	if err := srv.runtime.graph.UpdateChecksums(repoData.ImgList); err != nil {
+		return err
+	}
+
+	utils.Debugf("Retrieving the tag list")
+	tagsList, err := srv.registry.GetRemoteTags(repoData.Endpoints, remote, repoData.Tokens)
+	if err != nil {
+		return err
+	}
+	utils.Debugf("Registering tags")
+	// If not specific tag have been asked, take all
+	if askedTag == "" {
+		for tag, id := range tagsList {
+			repoData.ImgList[id].Tag = tag
+		}
+	} else {
+		// Otherwise, check that the tag exists and use only that one
+		if id, exists := tagsList[askedTag]; !exists {
+			return fmt.Errorf("Tag %s not found in repositoy %s", askedTag, remote)
+		} else {
+			repoData.ImgList[id].Tag = askedTag
+		}
+	}
+
+	for _, img := range repoData.ImgList {
+		if askedTag != "" && img.Tag != askedTag {
+			utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.Id)
+			continue
+		}
+		fmt.Fprintf(out, "Pulling image %s (%s) from %s\n", img.Id, img.Tag, remote)
+		success := false
+		for _, ep := range repoData.Endpoints {
+			if err := srv.pullImage(out, img.Id, "https://"+ep+"/v1", repoData.Tokens); err != nil {
+				fmt.Fprintf(out, "Error while retrieving image for tag: %s (%s); checking next endpoint\n", askedTag, err)
+				continue
+			}
+			success = true
+			break
+		}
+		if !success {
+			return fmt.Errorf("Could not find repository on any of the indexed registries.")
+		}
+	}
+	for tag, id := range tagsList {
+		if askedTag != "" && tag != askedTag {
+			continue
+		}
+		if err := srv.runtime.repositories.Set(remote, tag, id, true); err != nil {
+			return err
+		}
+	}
+	if err := srv.runtime.repositories.Save(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
 func (srv *Server) ImagePull(name, tag, registry string, out io.Writer) error {
 func (srv *Server) ImagePull(name, tag, registry string, out io.Writer) error {
 	if registry != "" {
 	if registry != "" {
-		if err := srv.runtime.graph.PullImage(out, name, registry, nil); err != nil {
+		if err := srv.pullImage(out, name, registry, nil); err != nil {
 			return err
 			return err
 		}
 		}
 		return nil
 		return nil
 	}
 	}
-	if err := srv.runtime.graph.PullRepository(out, name, tag, srv.runtime.repositories, srv.runtime.authConfig); err != nil {
+
+	if err := srv.pullRepository(out, name, tag); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Retrieve the checksum of an image
+// Priority:
+// - Check on the stored checksums
+// - Check if the archive exists, if it does not, ask the registry
+// - If the archive does exists, process the checksum from it
+// - If the archive does not exists and not found on registry, process checksum from layer
+func (srv *Server) getChecksum(imageId string) (string, error) {
+	// FIXME: Use in-memory map instead of reading the file each time
+	if sums, err := srv.runtime.graph.getStoredChecksums(); err != nil {
+		return "", err
+	} else if checksum, exists := sums[imageId]; exists {
+		return checksum, nil
+	}
+
+	img, err := srv.runtime.graph.Get(imageId)
+	if err != nil {
+		return "", err
+	}
+
+	if _, err := os.Stat(layerArchivePath(srv.runtime.graph.imageRoot(imageId))); err != nil {
+		if os.IsNotExist(err) {
+			// TODO: Ask the registry for the checksum
+			//       As the archive is not there, it is supposed to come from a pull.
+		} else {
+			return "", err
+		}
+	}
+
+	checksum, err := img.Checksum()
+	if err != nil {
+		return "", err
+	}
+	return checksum, nil
+}
+
+// Retrieve the all the images to be uploaded in the correct order
+// Note: we can't use a map as it is not ordered
+func (srv *Server) getImageList(localRepo map[string]string) ([]*registry.ImgData, error) {
+	var imgList []*registry.ImgData
+
+	imageSet := make(map[string]struct{})
+	for tag, id := range localRepo {
+		img, err := srv.runtime.graph.Get(id)
+		if err != nil {
+			return nil, err
+		}
+		img.WalkHistory(func(img *Image) error {
+			if _, exists := imageSet[img.Id]; exists {
+				return nil
+			}
+			imageSet[img.Id] = struct{}{}
+			checksum, err := srv.getChecksum(img.Id)
+			if err != nil {
+				return err
+			}
+			imgList = append([]*registry.ImgData{{
+				Id:       img.Id,
+				Checksum: checksum,
+				Tag:      tag,
+			}}, imgList...)
+			return nil
+		})
+	}
+	return imgList, nil
+}
+
+func (srv *Server) pushRepository(out io.Writer, name string, localRepo map[string]string) error {
+	out = utils.NewWriteFlusher(out)
+	fmt.Fprintf(out, "Processing checksums\n")
+	imgList, err := srv.getImageList(localRepo)
+	if err != nil {
+		return err
+	}
+	fmt.Fprintf(out, "Sending image list\n")
+
+	repoData, err := srv.registry.PushImageJsonIndex(name, imgList, false)
+	if err != nil {
+		return err
+	}
+
+	// FIXME: Send only needed images
+	for _, ep := range repoData.Endpoints {
+		fmt.Fprintf(out, "Pushing repository %s to %s (%d tags)\r\n", name, ep, len(localRepo))
+		// For each image within the repo, push them
+		for _, elem := range imgList {
+			if _, exists := repoData.ImgList[elem.Id]; exists {
+				fmt.Fprintf(out, "Image %s already on registry, skipping\n", name)
+				continue
+			}
+			if err := srv.pushImage(out, name, elem.Id, ep, repoData.Tokens); err != nil {
+				// FIXME: Continue on error?
+				return err
+			}
+			fmt.Fprintf(out, "Pushing tags for rev [%s] on {%s}\n", elem.Id, ep+"/users/"+name+"/"+elem.Tag)
+			if err := srv.registry.PushRegistryTag(name, elem.Id, elem.Tag, ep, repoData.Tokens); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err := srv.registry.PushImageJsonIndex(name, imgList, true); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (srv *Server) pushImage(out io.Writer, remote, imgId, ep string, token []string) error {
+	out = utils.NewWriteFlusher(out)
+	jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgId, "json"))
+	if err != nil {
+		return fmt.Errorf("Error while retreiving the path for {%s}: %s", imgId, err)
+	}
+	fmt.Fprintf(out, "Pushing %s\r\n", imgId)
+
+	// Make sure we have the image's checksum
+	checksum, err := srv.getChecksum(imgId)
+	if err != nil {
+		return err
+	}
+	imgData := &registry.ImgData{
+		Id:       imgId,
+		Checksum: checksum,
+	}
+
+	// Send the json
+	if err := srv.registry.PushImageJsonRegistry(imgData, jsonRaw, ep, token); err != nil {
+		if err == registry.ErrAlreadyExists {
+			fmt.Fprintf(out, "Image %s already uploaded ; skipping\n", imgData.Id)
+			return nil
+		}
+		return err
+	}
+
+	// Retrieve the tarball to be sent
+	var layerData *TempArchive
+	// If the archive exists, use it
+	file, err := os.Open(layerArchivePath(srv.runtime.graph.imageRoot(imgId)))
+	if err != nil {
+		if os.IsNotExist(err) {
+			// If the archive does not exist, create one from the layer
+			layerData, err = srv.runtime.graph.TempLayerArchive(imgId, Xz, out)
+			if err != nil {
+				return fmt.Errorf("Failed to generate layer archive: %s", err)
+			}
+		} else {
+			return err
+		}
+	} else {
+		defer file.Close()
+		st, err := file.Stat()
+		if err != nil {
+			return err
+		}
+		layerData = &TempArchive{
+			File: file,
+			Size: st.Size(),
+		}
+	}
+
+	// Send the layer
+	if err := srv.registry.PushImageLayerRegistry(imgData.Id, utils.ProgressReader(layerData, int(layerData.Size), out, ""), ep, token); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 func (srv *Server) ImagePush(name, registry string, out io.Writer) error {
 func (srv *Server) ImagePush(name, registry string, out io.Writer) error {
+	out = utils.NewWriteFlusher(out)
 	img, err := srv.runtime.graph.Get(name)
 	img, err := srv.runtime.graph.Get(name)
 	if err != nil {
 	if err != nil {
-		Debugf("The push refers to a repository [%s] (len: %d)\n", name, len(srv.runtime.repositories.Repositories[name]))
+		fmt.Fprintf(out, "The push refers to a repository [%s] (len: %d)\n", name, len(srv.runtime.repositories.Repositories[name]))
 		// If it fails, try to get the repository
 		// If it fails, try to get the repository
 		if localRepo, exists := srv.runtime.repositories.Repositories[name]; exists {
 		if localRepo, exists := srv.runtime.repositories.Repositories[name]; exists {
-			if err := srv.runtime.graph.PushRepository(out, name, localRepo, srv.runtime.authConfig); err != nil {
+			if err := srv.pushRepository(out, name, localRepo); err != nil {
 				return err
 				return err
 			}
 			}
 			return nil
 			return nil
@@ -337,8 +597,8 @@ func (srv *Server) ImagePush(name, registry string, out io.Writer) error {
 
 
 		return err
 		return err
 	}
 	}
-	err = srv.runtime.graph.PushImage(out, img, registry, nil)
-	if err != nil {
+	fmt.Fprintf(out, "The push refers to an image: [%s]\n", name)
+	if err := srv.pushImage(out, name, img.Id, registry, nil); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
@@ -360,14 +620,14 @@ func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Write
 			u.Host = src
 			u.Host = src
 			u.Path = ""
 			u.Path = ""
 		}
 		}
-		fmt.Fprintln(out, "Downloading from", u)
+		fmt.Fprintf(out, "Downloading from %s\n", u)
 		// Download with curl (pretty progress bar)
 		// Download with curl (pretty progress bar)
 		// If curl is not available, fallback to http.Get()
 		// If curl is not available, fallback to http.Get()
-		resp, err = Download(u.String(), out)
+		resp, err = utils.Download(u.String(), out)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
-		archive = ProgressReader(resp.Body, int(resp.ContentLength), out, "Importing %v/%v (%v)")
+		archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, "Importing %v/%v (%v)")
 	}
 	}
 	img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
 	img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
 	if err != nil {
 	if err != nil {
@@ -379,7 +639,7 @@ func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Write
 			return err
 			return err
 		}
 		}
 	}
 	}
-	fmt.Fprintln(out, img.ShortId())
+	fmt.Fprintf(out, "%s\n", img.ShortId())
 	return nil
 	return nil
 }
 }
 
 
@@ -403,15 +663,6 @@ func (srv *Server) ContainerCreate(config *Config) (string, error) {
 	return container.ShortId(), nil
 	return container.ShortId(), nil
 }
 }
 
 
-func (srv *Server) ImageCreateFromFile(dockerfile io.Reader, out io.Writer) error {
-	img, err := NewBuilder(srv.runtime).Build(dockerfile, out)
-	if err != nil {
-		return err
-	}
-	fmt.Fprintf(out, "%s\n", img.ShortId())
-	return nil
-}
-
 func (srv *Server) ContainerRestart(name string, t int) error {
 func (srv *Server) ContainerRestart(name string, t int) error {
 	if container := srv.runtime.Get(name); container != nil {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Restart(t); err != nil {
 		if err := container.Restart(t); err != nil {
@@ -424,7 +675,6 @@ func (srv *Server) ContainerRestart(name string, t int) error {
 }
 }
 
 
 func (srv *Server) ContainerDestroy(name string, removeVolume bool) error {
 func (srv *Server) ContainerDestroy(name string, removeVolume bool) error {
-
 	if container := srv.runtime.Get(name); container != nil {
 	if container := srv.runtime.Get(name); container != nil {
 		volumes := make(map[string]struct{})
 		volumes := make(map[string]struct{})
 		// Store all the deleted containers volumes
 		// Store all the deleted containers volumes
@@ -473,6 +723,36 @@ func (srv *Server) ImageDelete(name string) error {
 	return nil
 	return nil
 }
 }
 
 
+func (srv *Server) ImageGetCached(imgId string, config *Config) (*Image, error) {
+
+	// Retrieve all images
+	images, err := srv.runtime.graph.All()
+	if err != nil {
+		return nil, err
+	}
+
+	// Store the tree in a map of map (map[parentId][childId])
+	imageMap := make(map[string]map[string]struct{})
+	for _, img := range images {
+		if _, exists := imageMap[img.Parent]; !exists {
+			imageMap[img.Parent] = make(map[string]struct{})
+		}
+		imageMap[img.Parent][img.Id] = struct{}{}
+	}
+
+	// Loop on the children of the given image and check the config
+	for elem := range imageMap[imgId] {
+		img, err := srv.runtime.graph.Get(elem)
+		if err != nil {
+			return nil, err
+		}
+		if CompareConfig(&img.ContainerConfig, config) {
+			return img, nil
+		}
+	}
+	return nil, nil
+}
+
 func (srv *Server) ContainerStart(name string) error {
 func (srv *Server) ContainerStart(name string) error {
 	if container := srv.runtime.Get(name); container != nil {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Start(); err != nil {
 		if err := container.Start(); err != nil {
@@ -513,17 +793,17 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std
 		if stdout {
 		if stdout {
 			cLog, err := container.ReadLog("stdout")
 			cLog, err := container.ReadLog("stdout")
 			if err != nil {
 			if err != nil {
-				Debugf(err.Error())
+				utils.Debugf(err.Error())
 			} else if _, err := io.Copy(out, cLog); err != nil {
 			} else if _, err := io.Copy(out, cLog); err != nil {
-				Debugf(err.Error())
+				utils.Debugf(err.Error())
 			}
 			}
 		}
 		}
 		if stderr {
 		if stderr {
 			cLog, err := container.ReadLog("stderr")
 			cLog, err := container.ReadLog("stderr")
 			if err != nil {
 			if err != nil {
-				Debugf(err.Error())
+				utils.Debugf(err.Error())
 			} else if _, err := io.Copy(out, cLog); err != nil {
 			} else if _, err := io.Copy(out, cLog); err != nil {
-				Debugf(err.Error())
+				utils.Debugf(err.Error())
 			}
 			}
 		}
 		}
 	}
 	}
@@ -544,7 +824,7 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std
 			r, w := io.Pipe()
 			r, w := io.Pipe()
 			go func() {
 			go func() {
 				defer w.Close()
 				defer w.Close()
-				defer Debugf("Closing buffered stdin pipe")
+				defer utils.Debugf("Closing buffered stdin pipe")
 				io.Copy(w, in)
 				io.Copy(w, in)
 			}()
 			}()
 			cStdin = r
 			cStdin = r
@@ -591,11 +871,14 @@ func NewServer(autoRestart bool) (*Server, error) {
 		return nil, err
 		return nil, err
 	}
 	}
 	srv := &Server{
 	srv := &Server{
-		runtime: runtime,
+		runtime:  runtime,
+		registry: registry.NewRegistry(runtime.root),
 	}
 	}
+	runtime.srv = srv
 	return srv, nil
 	return srv, nil
 }
 }
 
 
 type Server struct {
 type Server struct {
-	runtime *Runtime
+	runtime  *Runtime
+	registry *registry.Registry
 }
 }

+ 2 - 1
state.go

@@ -2,6 +2,7 @@ package docker
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/utils"
 	"sync"
 	"sync"
 	"time"
 	"time"
 )
 )
@@ -21,7 +22,7 @@ func (s *State) String() string {
 		if s.Ghost {
 		if s.Ghost {
 			return fmt.Sprintf("Ghost")
 			return fmt.Sprintf("Ghost")
 		}
 		}
-		return fmt.Sprintf("Up %s", HumanDuration(time.Now().Sub(s.StartedAt)))
+		return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().Sub(s.StartedAt)))
 	}
 	}
 	return fmt.Sprintf("Exit %d", s.ExitCode)
 	return fmt.Sprintf("Exit %d", s.ExitCode)
 }
 }

+ 2 - 1
tags.go

@@ -3,6 +3,7 @@ package docker
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
@@ -106,7 +107,7 @@ func (store *TagStore) ImageName(id string) string {
 	if names, exists := store.ById()[id]; exists && len(names) > 0 {
 	if names, exists := store.ById()[id]; exists && len(names) > 0 {
 		return names[0]
 		return names[0]
 	}
 	}
-	return TruncateId(id)
+	return utils.TruncateId(id)
 }
 }
 
 
 func (store *TagStore) Set(repoName, tag, imageName string, force bool) error {
 func (store *TagStore) Set(repoName, tag, imageName string, force bool) error {

+ 21 - 0
term/term.go

@@ -1,6 +1,8 @@
 package term
 package term
 
 
 import (
 import (
+	"os"
+	"os/signal"
 	"syscall"
 	"syscall"
 	"unsafe"
 	"unsafe"
 )
 )
@@ -120,3 +122,22 @@ func Restore(fd int, state *State) error {
 	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(setTermios), uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
 	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(setTermios), uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
 	return err
 	return err
 }
 }
+
+func SetRawTerminal() (*State, error) {
+	oldState, err := MakeRaw(int(os.Stdin.Fd()))
+	if err != nil {
+		return nil, err
+	}
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt)
+	go func() {
+		_ = <-c
+		Restore(int(os.Stdin.Fd()), oldState)
+		os.Exit(0)
+	}()
+	return oldState, err
+}
+
+func RestoreTerminal(state *State) {
+	Restore(int(os.Stdin.Fd()), state)
+}

+ 44 - 0
testing/README.rst

@@ -0,0 +1,44 @@
+=======
+testing
+=======
+
+This directory contains testing related files.
+
+
+Buildbot
+========
+
+Buildbot is a continuous integration system designed to automate the
+build/test cycle. By automatically rebuilding and testing the tree each time
+something has changed, build problems are pinpointed quickly, before other
+developers are inconvenienced by the failure.
+
+We are running buildbot in an AWS instance to verify docker passes all tests
+when commits get pushed to the master branch.
+
+You can check docker's buildbot instance at http://docker-ci.dotcloud.com/waterfall
+
+
+Deployment
+~~~~~~~~~~
+
+::
+
+  # Define AWS credential environment variables
+  export AWS_ACCESS_KEY_ID=xxxxxxxxxxxx
+  export AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxx
+  export AWS_KEYPAIR_NAME=xxxxxxxxxxxx
+  export AWS_SSH_PRIVKEY=xxxxxxxxxxxx
+
+  # Checkout docker
+  git clone git://github.com/dotcloud/docker.git
+
+  # Deploy docker on AWS
+  cd docker/testing
+  vagrant up --provider=aws
+
+
+Buildbot AWS dependencies
+-------------------------
+
+vagrant, virtualbox packages and vagrant aws plugin

+ 56 - 0
testing/Vagrantfile

@@ -0,0 +1,56 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+BOX_NAME = "docker-ci"
+BOX_URI = "http://files.vagrantup.com/precise64.box"
+AWS_AMI = "ami-d0f89fb9"
+DOCKER_PATH = "/data/docker"
+CFG_PATH = "#{DOCKER_PATH}/testing/buildbot"
+BUILDBOT_IP = "192.168.33.41"
+on_vbox = File.file?("#{File.dirname(__FILE__)}/.vagrant/machines/default/virtualbox/id") | \
+  Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty? & \
+  (on_vbox=true; ARGV.each do |arg| on_vbox &&= !arg.downcase.start_with?("--provider") end; on_vbox)
+USER = on_vbox ? "vagrant": "ubuntu"
+
+Vagrant::Config.run do |config|
+  # Setup virtual machine box. This VM configuration code is always executed.
+  config.vm.box = BOX_NAME
+  config.vm.box_url = BOX_URI
+  config.vm.share_folder "v-data", DOCKER_PATH, "#{File.dirname(__FILE__)}/.."
+  config.vm.network :hostonly, BUILDBOT_IP
+
+  # Deploy buildbot and its dependencies if it was not done
+  if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
+    pkg_cmd = "apt-get update -qq; apt-get install -q -y linux-image-3.8.0-19-generic; "
+    # Deploy buildbot CI
+    pkg_cmd << "apt-get install -q -y python-dev python-pip supervisor; " \
+      "pip install -r #{CFG_PATH}/requirements.txt; " \
+      "chown #{USER}.#{USER} /data; cd /data; " \
+      "#{CFG_PATH}/setup.sh #{USER} #{CFG_PATH}; "
+    # Install docker dependencies
+    pkg_cmd << "apt-get install -q -y python-software-properties; " \
+      "add-apt-repository -y ppa:gophers/go/ubuntu; apt-get update -qq; " \
+      "DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc bsdtar git golang-stable make; "
+    # Activate new kernel
+    pkg_cmd << "shutdown -r +1; "
+    config.vm.provision :shell, :inline => pkg_cmd
+  end
+end
+
+# Providers were added on Vagrant >= 1.1.0
+Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
+  config.vm.provider :aws do |aws, override|
+    aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
+    aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
+    aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
+    override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
+    override.ssh.username = USER
+    aws.ami = AWS_AMI
+    aws.region = "us-east-1"
+    aws.instance_type = "m1.small"
+    aws.security_groups = "gateway"
+  end
+
+  config.vm.provider :virtualbox do |vb|
+  end
+end

Some files were not shown because too many files changed in this diff