فهرست منبع

Merge remote-tracking branch 'upstream/master'

Conflicts:
	container.go
Zilin Du 11 سال پیش
والد
کامیت
b543516556
100فایلهای تغییر یافته به همراه3886 افزوده شده و 2235 حذف شده
  1. 1 0
      .gitignore
  2. 12 0
      .mailmap
  3. 69 1
      AUTHORS
  4. 672 331
      CHANGELOG.md
  5. 19 2
      CONTRIBUTING.md
  6. 7 8
      Dockerfile
  7. 2 3
      NOTICE
  8. 6 6
      README.md
  9. 1 1
      VERSION
  10. 4 0
      Vagrantfile
  11. 54 17
      api.go
  12. 10 17
      api_params.go
  13. 70 28
      api_test.go
  14. 4 1
      auth/auth_test.go
  15. 17 8
      buildfile.go
  16. 6 24
      buildfile_test.go
  17. 292 128
      commands.go
  18. 82 16
      commands_test.go
  19. 18 0
      config.go
  20. 392 149
      container.go
  21. 101 44
      container_test.go
  22. 0 1
      contrib/MAINTAINERS
  23. 0 1
      contrib/brew/.gitignore
  24. 0 78
      contrib/brew/README.md
  25. 0 1
      contrib/brew/brew/__init__.py
  26. 0 185
      contrib/brew/brew/brew.py
  27. 0 63
      contrib/brew/brew/git.py
  28. 0 35
      contrib/brew/docker-brew
  29. 0 2
      contrib/brew/requirements.txt
  30. 0 22
      contrib/brew/setup.py
  31. 1 1
      contrib/completion/bash/docker
  32. 242 0
      contrib/completion/zsh/_docker
  33. 27 0
      contrib/host-integration/Dockerfile.dev
  34. 4 0
      contrib/host-integration/Dockerfile.min
  35. 130 0
      contrib/host-integration/manager.go
  36. 53 0
      contrib/host-integration/manager.sh
  37. 20 0
      contrib/host-integration/manager/systemd
  38. 15 0
      contrib/host-integration/manager/upstart
  39. 13 0
      contrib/init/openrc/docker.confd
  40. 31 0
      contrib/init/openrc/docker.initd
  41. 13 0
      contrib/init/systemd/docker.service
  42. 85 0
      contrib/init/sysvinit/docker
  43. 2 2
      contrib/init/upstart/docker.conf
  44. 0 61
      contrib/install.sh
  45. 67 0
      contrib/mkimage-arch.sh
  46. 1 1
      contrib/mkimage-busybox.sh
  47. 0 66
      contrib/mkimage-debian.sh
  48. 233 0
      contrib/mkimage-debootstrap.sh
  49. 1 1
      contrib/mkimage-unittest.sh
  50. 22 0
      contrib/vim-syntax/LICENSE
  51. 23 0
      contrib/vim-syntax/README.md
  52. 18 0
      contrib/vim-syntax/doc/dockerfile.txt
  53. 1 0
      contrib/vim-syntax/ftdetect/dockerfile.vim
  54. 24 0
      contrib/vim-syntax/syntax/dockerfile.vim
  55. 54 26
      docker/docker.go
  56. 16 0
      dockerinit/dockerinit.go
  57. 6 1
      docs/Dockerfile
  58. 88 25
      docs/README.md
  59. 21 16
      docs/sources/api/docker_remote_api_v1.0.rst
  60. 31 26
      docs/sources/api/docker_remote_api_v1.1.rst
  61. 22 17
      docs/sources/api/docker_remote_api_v1.2.rst
  62. 22 17
      docs/sources/api/docker_remote_api_v1.3.rst
  63. 10 5
      docs/sources/api/docker_remote_api_v1.4.rst
  64. 23 18
      docs/sources/api/docker_remote_api_v1.5.rst
  65. 16 6
      docs/sources/api/docker_remote_api_v1.6.rst
  66. 1 1
      docs/sources/api/index.rst
  67. 709 38
      docs/sources/commandline/cli.rst
  68. 0 59
      docs/sources/commandline/command/attach.rst
  69. 0 65
      docs/sources/commandline/command/build.rst
  70. 0 52
      docs/sources/commandline/command/commit.rst
  71. 0 14
      docs/sources/commandline/command/cp.rst
  72. 0 13
      docs/sources/commandline/command/diff.rst
  73. 0 34
      docs/sources/commandline/command/events.rst
  74. 0 13
      docs/sources/commandline/command/export.rst
  75. 0 13
      docs/sources/commandline/command/history.rst
  76. 0 26
      docs/sources/commandline/command/images.rst
  77. 0 44
      docs/sources/commandline/command/import.rst
  78. 0 13
      docs/sources/commandline/command/info.rst
  79. 0 23
      docs/sources/commandline/command/insert.rst
  80. 0 13
      docs/sources/commandline/command/inspect.rst
  81. 0 13
      docs/sources/commandline/command/kill.rst
  82. 0 24
      docs/sources/commandline/command/login.rst
  83. 0 13
      docs/sources/commandline/command/logs.rst
  84. 0 13
      docs/sources/commandline/command/port.rst
  85. 0 17
      docs/sources/commandline/command/ps.rst
  86. 0 13
      docs/sources/commandline/command/pull.rst
  87. 0 13
      docs/sources/commandline/command/push.rst
  88. 0 13
      docs/sources/commandline/command/restart.rst
  89. 0 13
      docs/sources/commandline/command/rm.rst
  90. 0 13
      docs/sources/commandline/command/rmi.rst
  91. 0 85
      docs/sources/commandline/command/run.rst
  92. 0 14
      docs/sources/commandline/command/search.rst
  93. 0 13
      docs/sources/commandline/command/start.rst
  94. 0 15
      docs/sources/commandline/command/stop.rst
  95. 0 15
      docs/sources/commandline/command/tag.rst
  96. 0 13
      docs/sources/commandline/command/top.rst
  97. 0 7
      docs/sources/commandline/command/version.rst
  98. 0 13
      docs/sources/commandline/command/wait.rst
  99. 0 0
      docs/sources/commandline/docker_images.gif
  100. 2 33
      docs/sources/commandline/index.rst

+ 1 - 0
.gitignore

@@ -17,3 +17,4 @@ docs/_templates
 bundles/
 bundles/
 .hg/
 .hg/
 .git/
 .git/
+vendor/pkg/

+ 12 - 0
.mailmap

@@ -26,3 +26,15 @@ Roberto Hashioka <roberto_hashioka@hotmail.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
 David Sissitka <me@dsissitka.com>
 David Sissitka <me@dsissitka.com>
 Nolan Darilek <nolan@thewordnerd.info>
 Nolan Darilek <nolan@thewordnerd.info>
+<mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
+Benoit Chesneau <bchesneau@gmail.com>
+Jordan Arentsen <blissdev@gmail.com>
+Daniel Garcia <daniel@danielgarcia.info>
+Miguel Angel Fernández <elmendalerenda@gmail.com>
+Bhiraj Butala <abhiraj.butala@gmail.com>
+Faiz Khan <faizkhan00@gmail.com>
+Victor Lyuboslavsky <victor@victoreda.com>
+Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
+Matthew Mueller <mattmuelle@gmail.com>
+<mosoni@ebay.com> <mohitsoni1989@gmail.com>
+Shih-Yuan Lee <fourdollars@gmail.com>

+ 69 - 1
AUTHORS

@@ -5,64 +5,98 @@
 #
 #
 Al Tobey <al@ooyala.com>
 Al Tobey <al@ooyala.com>
 Alex Gaynor <alex.gaynor@gmail.com>
 Alex Gaynor <alex.gaynor@gmail.com>
+Alexander Larsson <alexl@redhat.com>
 Alexey Shamrin <shamrin@gmail.com>
 Alexey Shamrin <shamrin@gmail.com>
 Andrea Luzzardi <aluzzardi@gmail.com>
 Andrea Luzzardi <aluzzardi@gmail.com>
+Andreas Savvides <andreas@editd.com>
 Andreas Tiefenthaler <at@an-ti.eu>
 Andreas Tiefenthaler <at@an-ti.eu>
+Andrew Macgregor <andrew.macgregor@agworld.com.au>
 Andrew Munsell <andrew@wizardapps.net>
 Andrew Munsell <andrew@wizardapps.net>
 Andrews Medina <andrewsmedina@gmail.com>
 Andrews Medina <andrewsmedina@gmail.com>
 Andy Rothfusz <github@metaliveblog.com>
 Andy Rothfusz <github@metaliveblog.com>
 Andy Smith <github@anarkystic.com>
 Andy Smith <github@anarkystic.com>
 Anthony Bishopric <git@anthonybishopric.com>
 Anthony Bishopric <git@anthonybishopric.com>
 Antony Messerli <amesserl@rackspace.com>
 Antony Messerli <amesserl@rackspace.com>
+Asbjørn Enge <asbjorn@hanafjedle.net>
 Barry Allard <barry.allard@gmail.com>
 Barry Allard <barry.allard@gmail.com>
+Ben Toews <mastahyeti@gmail.com>
+Benoit Chesneau <bchesneau@gmail.com>
+Bhiraj Butala <abhiraj.butala@gmail.com>
+Bouke Haarsma <bouke@webatoom.nl>
 Brandon Liu <bdon@bdon.org>
 Brandon Liu <bdon@bdon.org>
+Brandon Philips <brandon@ifup.co>
 Brian McCallister <brianm@skife.org>
 Brian McCallister <brianm@skife.org>
 Brian Olsen <brian@maven-group.org>
 Brian Olsen <brian@maven-group.org>
+Brian Shumate <brian@couchbase.com>
+Briehan Lombaard <briehan.lombaard@gmail.com>
 Bruno Bigras <bigras.bruno@gmail.com>
 Bruno Bigras <bigras.bruno@gmail.com>
 Caleb Spare <cespare@gmail.com>
 Caleb Spare <cespare@gmail.com>
 Calen Pennington <cale@edx.org>
 Calen Pennington <cale@edx.org>
 Charles Hooper <charles.hooper@dotcloud.com>
 Charles Hooper <charles.hooper@dotcloud.com>
 Christopher Currie <codemonkey+github@gmail.com>
 Christopher Currie <codemonkey+github@gmail.com>
+Colin Dunklau <colin.dunklau@gmail.com>
 Colin Rice <colin@daedrum.net>
 Colin Rice <colin@daedrum.net>
+Dan Buch <d.buch@modcloth.com>
+Daniel Garcia <daniel@danielgarcia.info>
 Daniel Gasienica <daniel@gasienica.ch>
 Daniel Gasienica <daniel@gasienica.ch>
 Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
 Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
+Daniel Nordberg <dnordberg@gmail.com>
 Daniel Robinson <gottagetmac@gmail.com>
 Daniel Robinson <gottagetmac@gmail.com>
 Daniel Von Fange <daniel@leancoder.com>
 Daniel Von Fange <daniel@leancoder.com>
 Daniel YC Lin <dlin.tw@gmail.com>
 Daniel YC Lin <dlin.tw@gmail.com>
 David Calavera <david.calavera@gmail.com>
 David Calavera <david.calavera@gmail.com>
 David Sissitka <me@dsissitka.com>
 David Sissitka <me@dsissitka.com>
+Deni Bertovic <deni@kset.org>
 Dominik Honnef <dominik@honnef.co>
 Dominik Honnef <dominik@honnef.co>
 Don Spaulding <donspauldingii@gmail.com>
 Don Spaulding <donspauldingii@gmail.com>
 Dr Nic Williams <drnicwilliams@gmail.com>
 Dr Nic Williams <drnicwilliams@gmail.com>
+Dražen Lučanin <kermit666@gmail.com>
 Elias Probst <mail@eliasprobst.eu>
 Elias Probst <mail@eliasprobst.eu>
 Emily Rose <emily@contactvibe.com>
 Emily Rose <emily@contactvibe.com>
 Eric Hanchrow <ehanchrow@ine.com>
 Eric Hanchrow <ehanchrow@ine.com>
 Eric Myhre <hash@exultant.us>
 Eric Myhre <hash@exultant.us>
 Erno Hopearuoho <erno.hopearuoho@gmail.com>
 Erno Hopearuoho <erno.hopearuoho@gmail.com>
+Evan Phoenix <evan@fallingsnow.net>
 Evan Wies <evan@neomantra.net>
 Evan Wies <evan@neomantra.net>
 ezbercih <cem.ezberci@gmail.com>
 ezbercih <cem.ezberci@gmail.com>
 Fabrizio Regini <freegenie@gmail.com>
 Fabrizio Regini <freegenie@gmail.com>
+Faiz Khan <faizkhan00@gmail.com>
 Fareed Dudhia <fareeddudhia@googlemail.com>
 Fareed Dudhia <fareeddudhia@googlemail.com>
 Flavio Castelli <fcastelli@suse.com>
 Flavio Castelli <fcastelli@suse.com>
 Francisco Souza <f@souza.cc>
 Francisco Souza <f@souza.cc>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 Gabriel Monroy <gabriel@opdemand.com>
 Gabriel Monroy <gabriel@opdemand.com>
 Gareth Rushgrove <gareth@morethanseven.net>
 Gareth Rushgrove <gareth@morethanseven.net>
+Greg Thornton <xdissent@me.com>
 Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
 Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
+Guruprasad <lgp171188@gmail.com>
 Harley Laue <losinggeneration@gmail.com>
 Harley Laue <losinggeneration@gmail.com>
+Hector Castro <hectcastro@gmail.com>
 Hunter Blanks <hunter@twilio.com>
 Hunter Blanks <hunter@twilio.com>
+Isao Jonas <isao.jonas@gmail.com>
+James Carr <james.r.carr@gmail.com>
+Jason McVetta <jason.mcvetta@gmail.com>
+Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
 Jeff Lindsay <progrium@gmail.com>
 Jeff Lindsay <progrium@gmail.com>
 Jeremy Grosser <jeremy@synack.me>
 Jeremy Grosser <jeremy@synack.me>
+Jim Alateras <jima@comware.com.au>
+Jimmy Cuadra <jimmy@jimmycuadra.com>
+Joe Van Dyk <joe@tanga.com>
 Joffrey F <joffrey@dotcloud.com>
 Joffrey F <joffrey@dotcloud.com>
 Johan Euphrosine <proppy@google.com>
 Johan Euphrosine <proppy@google.com>
 John Costa <john.costa@gmail.com>
 John Costa <john.costa@gmail.com>
 Jon Wedaman <jweede@gmail.com>
 Jon Wedaman <jweede@gmail.com>
 Jonas Pfenniger <jonas@pfenniger.name>
 Jonas Pfenniger <jonas@pfenniger.name>
+Jonathan Mueller <j.mueller@apoveda.ch>
 Jonathan Rudenberg <jonathan@titanous.com>
 Jonathan Rudenberg <jonathan@titanous.com>
+Joost Cassee <joost@cassee.net>
+Jordan Arentsen <blissdev@gmail.com>
 Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
 Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
 Julien Barbier <write0@gmail.com>
 Julien Barbier <write0@gmail.com>
 Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
 Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
 Karan Lyons <karan@karanlyons.com>
 Karan Lyons <karan@karanlyons.com>
+Karl Grzeszczak <karl@karlgrz.com>
+Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
 Keli Hu <dev@keli.hu>
 Keli Hu <dev@keli.hu>
 Ken Cochrane <kencochrane@gmail.com>
 Ken Cochrane <kencochrane@gmail.com>
 Kevin Clark <kevin.clark@gmail.com>
 Kevin Clark <kevin.clark@gmail.com>
@@ -71,28 +105,45 @@ kim0 <email.ahmedkamal@googlemail.com>
 Kimbro Staken <kstaken@kstaken.com>
 Kimbro Staken <kstaken@kstaken.com>
 Kiran Gangadharan <kiran.daredevil@gmail.com>
 Kiran Gangadharan <kiran.daredevil@gmail.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
+Kyle Conroy <kyle.j.conroy@gmail.com>
+Laurie Voss <github@seldo.com>
 Louis Opter <kalessin@kalessin.fr>
 Louis Opter <kalessin@kalessin.fr>
+Manuel Meurer <manuel@krautcomputing.com>
 Marco Hennings <marco.hennings@freiheit.com>
 Marco Hennings <marco.hennings@freiheit.com>
 Marcus Farkas <toothlessgear@finitebox.com>
 Marcus Farkas <toothlessgear@finitebox.com>
+Marcus Ramberg <marcus@nordaaker.com>
 Mark McGranaghan <mmcgrana@gmail.com>
 Mark McGranaghan <mmcgrana@gmail.com>
-Martin Redmond <mrtodo@gmail.com>
+Marko Mikulicic <mmikulicic@gmail.com>
+Markus Fix <lispmeister@gmail.com>
+Martin Redmond <martin@tinychat.com>
+Matt Apperson <me@mattapperson.com>
+Matt Bachmann <bachmann.matt@gmail.com>
+Matthew Mueller <mattmuelle@gmail.com>
 Maxim Treskin <zerthurd@gmail.com>
 Maxim Treskin <zerthurd@gmail.com>
 meejah <meejah@meejah.ca>
 meejah <meejah@meejah.ca>
 Michael Crosby <crosby.michael@gmail.com>
 Michael Crosby <crosby.michael@gmail.com>
+Michael Gorsuch <gorsuch@github.com>
+Miguel Angel Fernández <elmendalerenda@gmail.com>
 Mike Gaffney <mike@uberu.com>
 Mike Gaffney <mike@uberu.com>
 Mikhail Sobolev <mss@mawhrin.net>
 Mikhail Sobolev <mss@mawhrin.net>
+Mohit Soni <mosoni@ebay.com>
+Morten Siebuhr <sbhr@sbhr.dk>
 Nan Monnand Deng <monnand@gmail.com>
 Nan Monnand Deng <monnand@gmail.com>
 Nate Jones <nate@endot.org>
 Nate Jones <nate@endot.org>
 Nelson Chen <crazysim@gmail.com>
 Nelson Chen <crazysim@gmail.com>
 Niall O'Higgins <niallo@unworkable.org>
 Niall O'Higgins <niallo@unworkable.org>
+Nick Payne <nick@kurai.co.uk>
 Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
 Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
 Nick Stinemates <nick@stinemates.org>
 Nick Stinemates <nick@stinemates.org>
 Nolan Darilek <nolan@thewordnerd.info>
 Nolan Darilek <nolan@thewordnerd.info>
 odk- <github@odkurzacz.org>
 odk- <github@odkurzacz.org>
+Pascal Borreli <pascal@borreli.com>
 Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Hammond <paul@paulhammond.org>
 Paul Hammond <paul@paulhammond.org>
 Phil Spitler <pspitler@gmail.com>
 Phil Spitler <pspitler@gmail.com>
 Piotr Bogdan <ppbogdan@gmail.com>
 Piotr Bogdan <ppbogdan@gmail.com>
+pysqz <randomq@126.com>
+Ramon van Alteren <ramon@vanalteren.nl>
 Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
 Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
 Rhys Hiltner <rhys@twitch.tv>
 Rhys Hiltner <rhys@twitch.tv>
 Robert Obryk <robryk@gmail.com>
 Robert Obryk <robryk@gmail.com>
@@ -100,14 +151,22 @@ Roberto Hashioka <roberto_hashioka@hotmail.com>
 Ryan Fowler <rwfowler@gmail.com>
 Ryan Fowler <rwfowler@gmail.com>
 Sam Alba <sam.alba@gmail.com>
 Sam Alba <sam.alba@gmail.com>
 Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
 Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
+Scott Bessler <scottbessler@gmail.com>
+Sean P. Kane <skane@newrelic.com>
 Shawn Siefkas <shawn.siefkas@meredith.com>
 Shawn Siefkas <shawn.siefkas@meredith.com>
+Shih-Yuan Lee <fourdollars@gmail.com>
 Silas Sewell <silas@sewell.org>
 Silas Sewell <silas@sewell.org>
 Solomon Hykes <solomon@dotcloud.com>
 Solomon Hykes <solomon@dotcloud.com>
+Song Gao <song@gao.io>
+Sridatta Thatipamala <sthatipamala@gmail.com>
 Sridhar Ratnakumar <sridharr@activestate.com>
 Sridhar Ratnakumar <sridharr@activestate.com>
+Steeve Morin <steeve.morin@gmail.com>
 Stefan Praszalowicz <stefan@greplin.com>
 Stefan Praszalowicz <stefan@greplin.com>
 Thatcher Peskens <thatcher@dotcloud.com>
 Thatcher Peskens <thatcher@dotcloud.com>
+Thermionix <bond711@gmail.com>
 Thijs Terlouw <thijsterlouw@gmail.com>
 Thijs Terlouw <thijsterlouw@gmail.com>
 Thomas Bikeev <thomas.bikeev@mac.com>
 Thomas Bikeev <thomas.bikeev@mac.com>
+Thomas Frössman <thomasf@jossystem.se>
 Thomas Hansen <thomas.hansen@gmail.com>
 Thomas Hansen <thomas.hansen@gmail.com>
 Tianon Gravi <admwiggin@gmail.com>
 Tianon Gravi <admwiggin@gmail.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
@@ -115,8 +174,17 @@ Tobias Bieniek <Tobias.Bieniek@gmx.de>
 Tobias Schmidt <ts@soundcloud.com>
 Tobias Schmidt <ts@soundcloud.com>
 Tobias Schwab <tobias.schwab@dynport.de>
 Tobias Schwab <tobias.schwab@dynport.de>
 Tom Hulihan <hulihan.tom159@gmail.com>
 Tom Hulihan <hulihan.tom159@gmail.com>
+Tommaso Visconti <tommaso.visconti@gmail.com>
+Tyler Brock <tyler.brock@gmail.com>
 unclejack <unclejacksons@gmail.com>
 unclejack <unclejacksons@gmail.com>
+Victor Coisne <victor.coisne@dotcloud.com>
+Victor Lyuboslavsky <victor@victoreda.com>
 Victor Vieux <victor.vieux@dotcloud.com>
 Victor Vieux <victor.vieux@dotcloud.com>
+Vincent Bernat <bernat@luffy.cx>
 Vivek Agarwal <me@vivek.im>
 Vivek Agarwal <me@vivek.im>
+Vladimir Kirillov <proger@wilab.org.ua>
 Walter Stanish <walter@pratyeka.org>
 Walter Stanish <walter@pratyeka.org>
+Wes Morgan <cap10morgan@gmail.com>
 Will Dietz <w@wdtz.org>
 Will Dietz <w@wdtz.org>
+Yang Bai <hamo.by@gmail.com>
+Zaiste! <oh@zaiste.net>

+ 672 - 331
CHANGELOG.md

@@ -1,407 +1,748 @@
 # Changelog
 # Changelog
 
 
+## 0.6.5 (2013-10-29)
+
+#### Runtime
+
++ Runtime: Containers can now be named
++ Runtime: Containers can now be linked together for service discovery
++ Runtime: 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors
++ Runtime: Automatically start crashed containers after a reboot
++ Runtime: Expose IP, port, and proto as separate environment vars for container links
+* Runtime: Allow ports to be published to specific ips
+* Runtime: Prohibit inter-container communication by default
+- Runtime: Ignore ErrClosedPipe for stdin in Container.Attach
+- Runtime: Fix untag during removal of images
+- Runtime: Remove unused field kernelVersion
+* Runtime: Fix issue when mounting subdirectories of /mnt in container
+* Runtime: Check return value of syscall.Chdir when changing working directory inside dockerinit
+
+#### Documentation
+
+* Documentation: Fix the flags for nc in example
+
+#### Client
+
+- Client: Only pass stdin to hijack when needed to avoid closed pipe errors
+* Client: Use less reflection in command-line method invocation
+- Client: Monitor the tty size after starting the container, not prior
+- Client: Remove useless os.Exit() calls after log.Fatal
+
+#### Hack
+
+- Hack: Update install.sh with $sh_c to get sudo/su for modprobe
+* Hack: Update all the mkimage scripts to use --numeric-owner as a tar argument
+* Hack: Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues
++ Hack: Add initial init scripts library and a safer Ubuntu packaging script that works for Debian
+* Hack: Add -p option to invoke debootstrap with http_proxy
+
+#### Other
+
+* Testing: Remove warnings and prevent mount issues
+- Testing: Change logic for tty resize to avoid warning in tests
+- Builder: Fix race condition in docker build with verbose output
+- Registry: Fix content-type for PushImageJSONIndex method
+* Contrib: Improve helper tools to generate debian and Arch linux server images
+
+## 0.6.4 (2013-10-16)
+
+#### Runtime
+
+- Add cleanup of container when Start() fails
+* Add better comments to utils/stdcopy.go
+* Add utils.Errorf for error logging
++ Add -rm to docker run for removing a container on exit
+- Remove error messages which are not actually errors
+- Fix `docker rm` with volumes
+- Fix some error cases where a HTTP body might not be closed
+- Fix panic with wrong dockercfg file
+- Fix the attach behavior with -i
+* Record termination time in state.
+- Use empty string so TempDir uses the OS's temp dir automatically
+- Make sure to close the network allocators
++ Autorestart containers by default
+* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)`
+* lxc: Allow set_file_cap capability in container
+- Move run -rm to the cli only
+* Split stdout stderr
+* Always create a new session for the container
+
+#### Testing
+
+- Add aggregated docker-ci email report
+- Add cleanup to remove leftover containers
+* Add nightly release to docker-ci
+* Add more tests around auth.ResolveAuthConfig
+- Remove a few errors in tests
+- Catch errClosing error when TCP and UDP proxies are terminated
+* Only run certain tests with TESTFLAGS='-run TestName' make.sh
+* Prevent docker-ci to test closing PRs
+* Replace panic by log.Fatal in tests
+- Increase TestRunDetach timeout
+
+#### Documentation
+
+* Add initial draft of the Docker infrastructure doc
+* Add devenvironment link to CONTRIBUTING.md
+* Add `apt-get install curl` to Ubuntu docs
+* Add explanation for export restrictions
+* Add .dockercfg doc
+* Remove Gentoo install notes about #1422 workaround
+* Fix help text for -v option
+* Fix Ping endpoint documentation
+- Fix parameter names in docs for ADD command
+- Fix ironic typo in changelog
+* Various command fixes in postgres example
+* Document how to edit and release docs
+- Minor updates to `postgresql_service.rst`
+* Clarify LGTM process to contributors
+- Corrected error in the package name
+* Document what `vagrant up` is actually doing
++ improve doc search results
+* Cleanup whitespace in API 1.5 docs
+* use angle brackets in MAINTAINER example email
+* Update archlinux.rst
++ Changes to a new style for the docs. Includes version switcher.
+* Formatting, add information about multiline json
+* Improve registry and index REST API documentation
+- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3
+* Update Gentoo installation documentation now that we're in the portage tree proper
+* Cleanup and reorganize docs and tooling for contributors and maintainers
+- Minor spelling correction of protocoll -> protocol
+
+#### Contrib
+
+* Add vim syntax highlighting for Dockerfiles from @honza
+* Add mkimage-arch.sh
+* Reorganize contributed completion scripts to add zsh completion
+
+#### Hack
+
+* Add vagrant user to the docker group
+* Add proper bash completion for "docker push"
+* Add xz utils as a runtime dep
+* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates
++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link
+* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly
++ Add @tianon to hack/MAINTAINERS
+* Improve network performance for VirtualBox
+* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.)
+- Fix contrib/mkimage-debian.sh apt caching prevention
++ Added Dockerfile.tmLanguage to contrib
+* Configured FPM to make /etc/init/docker.conf a config file
+* Enable SSH Agent forwarding in Vagrant VM
+* Several small tweaks/fixes for contrib/mkimage-debian.sh
+
+#### Other
+
+- Builder: Abort build if mergeConfig returns an error and fix duplicate error message
+- Packaging: Remove deprecated packaging directory
+- Registry: Use correct auth config when logging in.
+- Registry: Fix the error message so it is the same as the regex
+
 ## 0.6.3 (2013-09-23)
 ## 0.6.3 (2013-09-23)
-* Packaging: Update tar vendor dependency
+
+#### Packaging
+
+* Add 'docker' group on install for ubuntu package
+* Update tar vendor dependency
+* Download apt key over HTTPS
+
+#### Runtime
+
+- Only copy and change permissions on non-bindmount volumes
+* Allow multiple volumes-from
+- Fix HTTP imports from STDIN
+
+#### Documentation
+
+* Update section on extracting the docker binary after build
+* Update development environment docs for new build process
+* Remove 'base' image from documentation
+
+#### Other
+
 - Client: Fix detach issue
 - Client: Fix detach issue
-- Runtime: Only copy and change permissions on non-bindmount volumes
 - Registry: Update regular expression to match index
 - Registry: Update regular expression to match index
-* Runtime: Allow multiple volumes-from
-* Packaging: Download apt key over HTTPS
-* Documentation: Update section on extracting the docker binary after build
-* Documentation: Update development environment docs for new build process
-* Documentation: Remove 'base' image from documentation
-* Packaging: Add 'docker' group on install for ubuntu package
-- Runtime: Fix HTTP imports from STDIN
 
 
 ## 0.6.2 (2013-09-17)
 ## 0.6.2 (2013-09-17)
+
+#### Runtime
+
++ Add domainname support
++ Implement image filtering with path.Match
+* Remove unnecesasry warnings
+* Remove os/user dependency
+* Only mount the hostname file when the config exists
+* Handle signals within the `docker login` command
+- UID and GID are now also applied to volumes
+- `docker start` set error code upon error
+- `docker run` set the same error code as the process started
+
+#### Builder
+
++ Add -rm option in order to remove intermediate containers
+* Allow multiline for the RUN instruction
+
+#### Registry
+
+* Implement login with private registry
+- Fix push issues
+
+#### Other
+
 + Hack: Vendor all dependencies
 + Hack: Vendor all dependencies
-+ Builder: Add -rm option in order to remove intermediate containers
-+ Runtime: Add domainname support
-+ Runtime: Implement image filtering with path.Match
-* Builder: Allow multiline for the RUN instruction
-* Runtime: Remove unnecesasry warnings
-* Runtime: Only mount the hostname file when the config exists
-* Runtime: Handle signals within the `docker login` command
-* Runtime: Remove os/user dependency
-* Registry: Implement login with private registry
 * Remote API: Bump to v1.5
 * Remote API: Bump to v1.5
 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc.
 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc.
-* Documentation: General improvements
-- Runtime: UID and GID are now also applied to volumes
-- Runtime: `docker start` set error code upon error
-- Runtime: `docker run` set the same error code as the process started
-- Registry: Fix push issues
+* Documentation: General improvments
 
 
 ## 0.6.1 (2013-08-23)
 ## 0.6.1 (2013-08-23)
-* Registry: Pass "meta" headers in API calls to the registry
-- Packaging: Use correct upstart script with new build tool
-- Packaging: Use libffi-dev, don't build it from sources
-- Packaging: Removed duplicate mercurial install command
+
+#### Registry
+
+* Pass "meta" headers in API calls to the registry
+
+#### Packaging
+
+- Use correct upstart script with new build tool
+- Use libffi-dev, don`t build it from sources
+- Remove duplicate mercurial install command
 
 
 ## 0.6.0 (2013-08-22)
 ## 0.6.0 (2013-08-22)
-- Runtime: Load authConfig only when needed and fix useless WARNING
-+ Runtime: Add lxc-conf flag to allow custom lxc options
-- Runtime: Fix race conditions in parallel pull
-- Runtime: Improve CMD, ENTRYPOINT, and attach docs.
-* Documentation: Small fix to docs regarding adding docker groups
-* Documentation: Add MongoDB image example
-+ Builder: Add USER instruction do Dockerfile
-* Documentation: updated default -H docs
-* Remote API: Sort Images by most recent creation date.
-+ Builder: Add workdir support for the Buildfile
-+ Runtime: Add an option to set the working directory
-- Runtime: Show tag used when image is missing
-* Documentation: Update readme with dependencies for building
-* Documentation: Add instructions for creating and using the docker group
-* Remote API: Reworking opaque requests in registry module
-- Runtime: Fix Graph ByParent() to generate list of child images per parent image.
-* Runtime: Add Image name to LogEvent tests
-* Documentation: Add sudo to examples and installation to documentation
-+ Hack: Bash Completion: Limit commands to containers of a relevant state
-* Remote API: Add image name in /events
-* Runtime: Apply volumes-from before creating volumes
-- Runtime: Make docker run handle SIGINT/SIGTERM
-- Runtime: Prevent crash when .dockercfg not readable
-* Hack: Add docker dependencies coverage testing into docker-ci
-+ Runtime: Add -privileged flag and relevant tests, docs, and examples
-+ Packaging: Docker-brew 0.5.2 support and memory footprint reduction
-- Runtime: Install script should be fetched over https, not http.
-* Packaging: Add new docker dependencies into docker-ci
-* Runtime: Use Go 1.1.2 for dockerbuilder
-* Registry: Improve auth push
-* Runtime: API, issue 1471: Use groups for socket permissions
-* Documentation: PostgreSQL service example in documentation
+
+#### Runtime
+
++ Add lxc-conf flag to allow custom lxc options
++ Add an option to set the working directory
+* Add Image name to LogEvent tests
++ Add -privileged flag and relevant tests, docs, and examples
+* Add websocket support to /container/<name>/attach/ws
+* Add warning when net.ipv4.ip_forwarding = 0
+* Add hostname to environment
+* Add last stable version in `docker version`
+- Fix race conditions in parallel pull
+- Fix Graph ByParent() to generate list of child images per parent image.
+- Fix typo: fmt.Sprint -> fmt.Sprintf
+- Fix small \n error un docker build
+* Fix to "Inject dockerinit at /.dockerinit"
+* Fix #910. print user name to docker info output
+* Use Go 1.1.2 for dockerbuilder
+* Use ranged for loop on channels
+- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete
+- Improve CMD, ENTRYPOINT, and attach docs.
+- Improve connect message with socket error
+- Load authConfig only when needed and fix useless WARNING
+- Show tag used when image is missing
+* Apply volumes-from before creating volumes
+- Make docker run handle SIGINT/SIGTERM
+- Prevent crash when .dockercfg not readable
+- Install script should be fetched over https, not http.
+* API, issue 1471: Use groups for socket permissions
+- Correctly detect IPv4 forwarding
+* Mount /dev/shm as a tmpfs
+- Switch from http to https for get.docker.io
+* Let userland proxy handle container-bound traffic
+* Updated the Docker CLI to specify a value for the "Host" header.
+- Change network range to avoid conflict with EC2 DNS
+- Reduce connect and read timeout when pinging the registry
+* Parallel pull
+- Handle ip route showing mask-less IP addresses
+* Allow ENTRYPOINT without CMD
+- Always consider localhost as a domain name when parsing the FQN repos name
+* Refactor checksum
+
+#### Documentation
+
+* Add MongoDB image example
+* Add instructions for creating and using the docker group
+* Add sudo to examples and installation to documentation
+* Add ufw doc
+* Add a reference to ps -a
+* Add information about Docker`s high level tools over LXC.
+* Fix typo in docs for docker run -dns
+* Fix a typo in the ubuntu installation guide
+* Fix to docs regarding adding docker groups
+* Update default -H docs
+* Update readme with dependencies for building
+* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2
+* PostgreSQL service example in documentation
+* Suggest installing linux-headers by default.
+* Change the twitter handle
+* Clarify Amazon EC2 installation
+* 'Base' image is deprecated and should no longer be referenced in the docs.
+* Move note about officially supported kernel
+- Solved the logo being squished in Safari
+
+#### Builder
+
++ Add USER instruction do Dockerfile
++ Add workdir support for the Buildfile
+* Add no cache for docker build
+- Fix docker build and docker events output
+- Only count known instructions as build steps
+- Make sure ENV instruction within build perform a commit each time
+- Forbid certain paths within docker build ADD
+- Repository name (and optionally a tag) in build usage
+- Make sure ADD will create everything in 0755
+
+#### Remote API
+
+* Sort Images by most recent creation date.
+* Reworking opaque requests in registry module
+* Add image name in /events
+* Use mime pkg to parse Content-Type
+* 650 http utils and user agent field
+
+#### Hack
+
++ Bash Completion: Limit commands to containers of a relevant state
+* Add docker dependencies coverage testing into docker-ci
+
+#### Packaging
+
++ Docker-brew 0.5.2 support and memory footprint reduction
+* Add new docker dependencies into docker-ci
+- Revert "docker.upstart: avoid spawning a `sh` process"
++ Docker-brew and Docker standard library
++ Release docker with docker
+* Fix the upstart script generated by get.docker.io
+* Enabled the docs to generate manpages.
+* Revert Bind daemon to 0.0.0.0 in Vagrant.
+
+#### Register
+
+* Improve auth push
+* Registry unit tests + mock registry
+
+#### Tests
+
+* Improve TestKillDifferentUser to prevent timeout on buildbot
+- Fix typo in TestBindMounts (runContainer called without image)
+* Improve TestGetContainersTop so it does not rely on sleep
+* Relax the lo interface test to allow iface index != 1
+* Add registry functional test to docker-ci
+* Add some tests in server and utils
+
+#### Other
+
 * Contrib: bash completion script
 * Contrib: bash completion script
-* Tests: Improve TestKillDifferentUser to prevent timeout on buildbot
-* Documentation: Fix typo in docs for docker run -dns
-* Documentation: Adding a reference to ps -a
-- Runtime: Correctly detect IPv4 forwarding
-- Packaging: Revert "docker.upstart: avoid spawning a `sh` process"
-* Runtime: Use ranged for loop on channels
-- Runtime: Fix typo: fmt.Sprint -> fmt.Sprintf
-- Tests: Fix typo in TestBindMounts (runContainer called without image)
-* Runtime: add websocket support to /container/<name>/attach/ws
-* Runtime: Mount /dev/shm as a tmpfs
-- Builder: Only count known instructions as build steps
-- Builder: Fix docker build and docker events output
-- Runtime: switch from http to https for get.docker.io
-* Tests: Improve TestGetContainersTop so it does not rely on sleep
-+ Packaging: Docker-brew and Docker standard library
-* Testing: Add some tests in server and utils
-+ Packaging: Release docker with docker
-- Builder: Make sure ENV instruction within build perform a commit each time
-* Packaging: Fix the upstart script generated by get.docker.io
-- Runtime: fix small \n error un docker build
-* Runtime: Let userland proxy handle container-bound traffic
-* Runtime: Updated the Docker CLI to specify a value for the "Host" header.
-* Runtime: Add warning when net.ipv4.ip_forwarding = 0
-* Registry: Registry unit tests + mock registry
-* Runtime: fixed #910. print user name to docker info output
-- Builder: Forbid certain paths within docker build ADD
-- Runtime: change network range to avoid conflict with EC2 DNS
-* Tests: Relax the lo interface test to allow iface index != 1
-* Documentation: Suggest installing linux-headers by default.
-* Documentation: Change the twitter handle
 * Client: Add docker cp command and copy api endpoint to copy container files/folders to the host
 * Client: Add docker cp command and copy api endpoint to copy container files/folders to the host
-* Remote API: Use mime pkg to parse Content-Type
-- Runtime: Reduce connect and read timeout when pinging the registry
-* Documentation: Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2
-* Packaging: Enabled the docs to generate manpages.
-* Runtime: Parallel pull
-- Runtime: Handle ip route showing mask-less IP addresses
-* Documentation: Clarify Amazon EC2 installation
-* Documentation: 'Base' image is deprecated and should no longer be referenced in the docs.
-* Runtime: Fix to "Inject dockerinit at /.dockerinit"
-* Runtime: Allow ENTRYPOINT without CMD
-- Runtime: Always consider localhost as a domain name when parsing the FQN repos name
-* Remote API: 650 http utils and user agent field
-* Documentation: fix a typo in the ubuntu installation guide
-- Builder: Repository name (and optionally a tag) in build usage
-* Documentation: Move note about officially supported kernel
-* Packaging: Revert "Bind daemon to 0.0.0.0 in Vagrant.
-* Builder: Add no cache for docker build
-* Runtime: Add hostname to environment
-* Runtime: Add last stable version in `docker version`
-- Builder: Make sure ADD will create everything in 0755
-* Documentation: Add ufw doc
-* Tests: Add registry functional test to docker-ci
-- Documentation: Solved the logo being squished in Safari
-- Runtime: Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete
-* Runtime: Refactor checksum
-- Runtime: Improve connect message with socket error
-* Documentation: Added information about Docker's high level tools over LXC.
-* Don't read from stdout when only attached to stdin
+* Don`t read from stdout when only attached to stdin
 
 
 ## 0.5.3 (2013-08-13)
 ## 0.5.3 (2013-08-13)
-* Runtime: Use docker group for socket permissions
-- Runtime: Spawn shell within upstart script
-- Builder: Make sure ENV instruction within build perform a commit each time
-- Runtime: Handle ip route showing mask-less IP addresses
-- Runtime: Add hostname to environment
+
+#### Runtime
+
+* Use docker group for socket permissions
+- Spawn shell within upstart script
+- Handle ip route showing mask-less IP addresses
+- Add hostname to environment
+
+#### Builder
+
+- Make sure ENV instruction within build perform a commit each time
 
 
 ## 0.5.2 (2013-08-08)
 ## 0.5.2 (2013-08-08)
- * Builder: Forbid certain paths within docker build ADD
- - Runtime: Change network range to avoid conflict with EC2 DNS
- * API: Change daemon to listen on unix socket by default
+
+* Builder: Forbid certain paths within docker build ADD
+- Runtime: Change network range to avoid conflict with EC2 DNS
+* API: Change daemon to listen on unix socket by default
 
 
 ## 0.5.1 (2013-07-30)
 ## 0.5.1 (2013-07-30)
- + API: Docker client now sets useragent (RFC 2616)
- + Runtime: Add `ps` args to `docker top`
- + Runtime: Add support for container ID files (pidfile like)
- + Runtime: Add container=lxc in default env
- + Runtime: Support networkless containers with `docker run -n` and `docker -d -b=none`
- + API: Add /events endpoint
- + Builder: ADD command now understands URLs
- + Builder: CmdAdd and CmdEnv now respect Dockerfile-set ENV variables
- * Hack: Simplify unit tests with helpers
- * Hack: Improve docker.upstart event
- * Hack: Add coverage testing into docker-ci
- * Runtime: Stdout/stderr logs are now stored in the same file as JSON
- * Runtime: Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3.
- * Runtime: Change .dockercfg format to json and support multiple auth remote
- - Runtime: Do not override volumes from config
- - Runtime: Fix issue with EXPOSE override
- - Builder: Create directories with 755 instead of 700 within ADD instruction
+
+#### Runtime
+
++ Add `ps` args to `docker top`
++ Add support for container ID files (pidfile like)
++ Add container=lxc in default env
++ Support networkless containers with `docker run -n` and `docker -d -b=none`
+* Stdout/stderr logs are now stored in the same file as JSON
+* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3.
+* Change .dockercfg format to json and support multiple auth remote
+- Do not override volumes from config
+- Fix issue with EXPOSE override
+
+#### API
+
++ Docker client now sets useragent (RFC 2616)
++ Add /events endpoint
+
+#### Builder
+
++ ADD command now understands URLs
++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables
+- Create directories with 755 instead of 700 within ADD instruction
+
+#### Hack
+
+* Simplify unit tests with helpers
+* Improve docker.upstart event
+* Add coverage testing into docker-ci
 
 
 ## 0.5.0 (2013-07-17)
 ## 0.5.0 (2013-07-17)
- + Runtime: List all processes running inside a container with 'docker top'
- + Runtime: Host directories can be mounted as volumes with 'docker run -v'
- + Runtime: Containers can expose public UDP ports (eg, '-p 123/udp')
- + Runtime: Optionally specify an exact public port (eg. '-p 80:4500')
- + Registry: New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries
- + Builder: ENTRYPOINT instruction sets a default binary entry point to a container
- + Builder: VOLUME instruction marks a part of the container as persistent data
- * Builder: 'docker build' displays the full output of a build by default
- * Runtime: 'docker login' supports additional options
- - Runtime: Dont save a container's hostname when committing an image.
- - Registry: Fix issues when uploading images to a private registry
+
+#### Runtime
+
++ List all processes running inside a container with 'docker top'
++ Host directories can be mounted as volumes with 'docker run -v'
++ Containers can expose public UDP ports (eg, '-p 123/udp')
++ Optionally specify an exact public port (eg. '-p 80:4500')
+* 'docker login' supports additional options
+- Dont save a container`s hostname when committing an image.
+
+#### Registry
+
++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries
+- Fix issues when uploading images to a private registry
+
+#### Builder
+
++ ENTRYPOINT instruction sets a default binary entry point to a container
++ VOLUME instruction marks a part of the container as persistent data
+* 'docker build' displays the full output of a build by default
 
 
 ## 0.4.8 (2013-07-01)
 ## 0.4.8 (2013-07-01)
- + Builder: New build operation ENTRYPOINT adds an executable entry point to the container.
- - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID.
- - Tests: Fix issues in the test suite
+
++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container.  - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID.
+- Tests: Fix issues in the test suite
 
 
 ## 0.4.7 (2013-06-28)
 ## 0.4.7 (2013-06-28)
- * Registry: easier push/pull to a custom registry
- * Remote API: the progress bar updates faster when downloading and uploading large files
- - Remote API: fix a bug in the optional unix socket transport
- * Runtime: improve detection of kernel version
- + Runtime: host directories can be mounted as volumes with 'docker run -b'
- - Runtime: fix an issue when only attaching to stdin
- * Runtime: use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts
- * Hack: improve test suite and dev environment
- * Hack: remove dependency on unit tests on 'os/user'
- + Documentation: add terminology section
+
+#### Remote API
+
+* The progress bar updates faster when downloading and uploading large files
+- Fix a bug in the optional unix socket transport
+
+#### Runtime
+
+* Improve detection of kernel version
++ Host directories can be mounted as volumes with 'docker run -b'
+- fix an issue when only attaching to stdin
+* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts
+
+#### Hack
+
+* Improve test suite and dev environment
+* Remove dependency on unit tests on 'os/user'
+
+#### Other
+
+* Registry: easier push/pull to a custom registry
++ Documentation: add terminology section
 
 
 ## 0.4.6 (2013-06-22)
 ## 0.4.6 (2013-06-22)
- - Runtime: fix a bug which caused creation of empty images (and volumes) to crash.
+
+- Runtime: fix a bug which caused creation of empty images (and volumes) to crash.
 
 
 ## 0.4.5 (2013-06-21)
 ## 0.4.5 (2013-06-21)
- + Builder: 'docker build git://URL' fetches and builds a remote git repository
- * Runtime: 'docker ps -s' optionally prints container size
- * Tests: Improved and simplified
- - Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail.
- - Builder: fix a regression when using ADD with single regular file.
+
++ Builder: 'docker build git://URL' fetches and builds a remote git repository
+* Runtime: 'docker ps -s' optionally prints container size
+* Tests: Improved and simplified
+- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail.
+- Builder: fix a regression when using ADD with single regular file.
 
 
 ## 0.4.4 (2013-06-19)
 ## 0.4.4 (2013-06-19)
- - Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients.
+
+- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients.
 
 
 ## 0.4.3 (2013-06-19)
 ## 0.4.3 (2013-06-19)
- + Builder: ADD of a local file will detect tar archives and unpack them
- * Runtime: Remove bsdtar dependency
- * Runtime: Add unix socket and multiple -H support
- * Runtime: Prevent rm of running containers
- * Runtime: Use go1.1 cookiejar
- * Builder: ADD improvements: use tar for copy + automatically unpack local archives
- * Builder: ADD uses tar/untar for copies instead of calling 'cp -ar'
- * Builder: nicer output for 'docker build'
- * Builder: fixed the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented.
- * Client: HumanReadable ProgressBar sizes in pull
- * Client: Fix docker version's git commit output
- * API: Send all tags on History API call
- * API: Add tag lookup to history command. Fixes #882
- - Runtime: Fix issue detaching from running TTY container
- - Runtime: Forbid parralel push/pull for a single image/repo. Fixes #311
- - Runtime: Fix race condition within Run command when attaching.
- - Builder: fix a bug which caused builds to fail if ADD was the first command
- - Documentation: fix missing command in irc bouncer example
+
+#### Builder
+
++ ADD of a local file will detect tar archives and unpack them
+* ADD improvements: use tar for copy + automatically unpack local archives
+* ADD uses tar/untar for copies instead of calling 'cp -ar'
+* Fixed the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented.
+- Fix a bug which caused builds to fail if ADD was the first command
+* Nicer output for 'docker build'
+
+#### Runtime
+
+* Remove bsdtar dependency
+* Add unix socket and multiple -H support
+* Prevent rm of running containers
+* Use go1.1 cookiejar
+- Fix issue detaching from running TTY container
+- Forbid parralel push/pull for a single image/repo. Fixes #311
+- Fix race condition within Run command when attaching.
+
+#### Client
+
+* HumanReadable ProgressBar sizes in pull
+* Fix docker version`s git commit output
+
+#### API
+
+* Send all tags on History API call
+* Add tag lookup to history command. Fixes #882
+
+#### Documentation
+
+- Fix missing command in irc bouncer example
 
 
 ## 0.4.2 (2013-06-17)
 ## 0.4.2 (2013-06-17)
- - Packaging: Bumped version to work around an Ubuntu bug
+
+- Packaging: Bumped version to work around an Ubuntu bug
 
 
 ## 0.4.1 (2013-06-17)
 ## 0.4.1 (2013-06-17)
- + Remote Api: Add flag to enable cross domain requests
- + Remote Api/Client: Add images and containers sizes in docker ps and docker images
- + Runtime: Configure dns configuration host-wide with 'docker -d -dns'
- + Runtime: Detect faulty DNS configuration and replace it with a public default
- + Runtime: allow docker run <name>:<id>
- + Runtime: you can now specify public port (ex: -p 80:4500)
- * Client: allow multiple params in inspect
- * Client: Print the container id before the hijack in `docker run`
- * Registry: add regexp check on repo's name
- * Registry: Move auth to the client
- * Runtime: improved image removal to garbage-collect unreferenced parents
- * Vagrantfile: Add the rest api port to vagrantfile's port_forward
- * Upgrade to Go 1.1
- - Builder: don't ignore last line in Dockerfile when it doesn't end with \n
- - Registry: Remove login check on pull
+
+#### Remote Api
+
++ Add flag to enable cross domain requests
++ Add images and containers sizes in docker ps and docker images
+
+#### Runtime
+
++ Configure dns configuration host-wide with 'docker -d -dns'
++ Detect faulty DNS configuration and replace it with a public default
++ Allow docker run <name>:<id>
++ You can now specify public port (ex: -p 80:4500)
+* Improved image removal to garbage-collect unreferenced parents
+
+#### Client
+
+* Allow multiple params in inspect
+* Print the container id before the hijack in `docker run`
+
+#### Registry
+
+* Add regexp check on repo`s name
+* Move auth to the client
+- Remove login check on pull
+
+#### Other
+
+* Vagrantfile: Add the rest api port to vagrantfile`s port_forward
+* Upgrade to Go 1.1
+- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n
 
 
 ## 0.4.0 (2013-06-03)
 ## 0.4.0 (2013-06-03)
- + Introducing Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
- + Introducing Remote API: control Docker programmatically using a simple HTTP/json API
- * Runtime: various reliability and usability improvements
+
+#### Builder
+
++ Introducing Builder
++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
+
+#### Remote API
+
++ Introducing Remote API
++ control Docker programmatically using a simple HTTP/json API
+
+#### Runtime
+
+* Various reliability and usability improvements
 
 
 ## 0.3.4 (2013-05-30)
 ## 0.3.4 (2013-05-30)
- + Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
- + Builder: 'docker build -t FOO' applies the tag FOO to the newly built container.
- + Runtime: interactive TTYs correctly handle window resize
- * Runtime: fix how configuration is merged between layers
- + Remote API: split stdout and stderr on 'docker run'
- + Remote API: optionally listen on a different IP and port (use at your own risk)
- * Documentation: improved install instructions.
+
+#### Builder
+
++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
++ 'docker build -t FOO' applies the tag FOO to the newly built container.
+
+#### Runtime
+
++ Interactive TTYs correctly handle window resize
+* Fix how configuration is merged between layers
+
+#### Remote API
+
++ Split stdout and stderr on 'docker run'
++ Optionally listen on a different IP and port (use at your own risk)
+
+#### Documentation
+
+* Improved install instructions.
 
 
 ## 0.3.3 (2013-05-23)
 ## 0.3.3 (2013-05-23)
- - Registry: Fix push regression
- - Various bugfixes
+
+- Registry: Fix push regression
+- Various bugfixes
 
 
 ## 0.3.2 (2013-05-09)
 ## 0.3.2 (2013-05-09)
- * Runtime: Store the actual archive on commit
- * Registry: Improve the checksum process
- * Registry: Use the size to have a good progress bar while pushing
- * Registry: Use the actual archive if it exists in order to speed up the push
- - Registry: Fix error 400 on push
+
+#### Registry
+
+* Improve the checksum process
+* Use the size to have a good progress bar while pushing
+* Use the actual archive if it exists in order to speed up the push
+- Fix error 400 on push
+
+#### Runtime
+
+* Store the actual archive on commit
 
 
 ## 0.3.1 (2013-05-08)
 ## 0.3.1 (2013-05-08)
- + Builder: Implement the autorun capability within docker builder
- + Builder: Add caching to docker builder
- + Builder: Add support for docker builder with native API as top level command
- + Runtime: Add go version to debug infos
- + Builder: Implement ENV within docker builder
- + Registry: Add docker search top level command in order to search a repository
- + Images: output graph of images to dot (graphviz)
- + Documentation: new introduction and high-level overview
- + Documentation: Add the documentation for docker builder
- + Website: new high-level overview
- - Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc
- - Images: fix ByParent function
- - Builder: Check the command existance prior create and add Unit tests for the case
- - Registry: Fix pull for official images with specific tag
- - Registry: Fix issue when login in with a different user and trying to push
- - Documentation: CSS fix for docker documentation to make REST API docs look better.
- - Documentation: Fixed CouchDB example page header mistake
- - Documentation: fixed README formatting
- * Registry: Improve checksum - async calculation
- * Runtime: kernel version - don't show the dash if flavor is empty
- * Documentation: updated www.docker.io website.
- * Builder: use any whitespaces instead of tabs
- * Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker
+
+#### Builder
+
++ Implement the autorun capability within docker builder
++ Add caching to docker builder
++ Add support for docker builder with native API as top level command
++ Implement ENV within docker builder
+- Check the command existance prior create and add Unit tests for the case
+* use any whitespaces instead of tabs
+
+#### Runtime
+
++ Add go version to debug infos
+* Kernel version - don`t show the dash if flavor is empty
+
+#### Registry
+
++ Add docker search top level command in order to search a repository
+- Fix pull for official images with specific tag
+- Fix issue when login in with a different user and trying to push
+* Improve checksum - async calculation
+
+#### Images
+
++ Output graph of images to dot (graphviz)
+- Fix ByParent function
+
+#### Documentation
+
++ New introduction and high-level overview
++ Add the documentation for docker builder
+- CSS fix for docker documentation to make REST API docs look better.
+- Fix CouchDB example page header mistake
+- Fix README formatting
+* Update www.docker.io website.
+
+#### Other
+
++ Website: new high-level overview
+- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc
+* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker
 
 
 ## 0.3.0 (2013-05-06)
 ## 0.3.0 (2013-05-06)
- + Registry: Implement the new registry
- + Documentation: new example: sharing data between 2 couchdb databases
- - Runtime: Fix the command existance check
- - Runtime: strings.Split may return an empty string on no match
- - Runtime: Fix an index out of range crash if cgroup memory is not
- * Documentation: Various improvments
- * Vagrant: Use only one deb line in /etc/apt
+
+#### Runtime
+
+- Fix the command existance check
+- strings.Split may return an empty string on no match
+- Fix an index out of range crash if cgroup memory is not
+
+#### Documentation
+
+* Various improvments
++ New example: sharing data between 2 couchdb databases
+
+#### Other
+
+* Vagrant: Use only one deb line in /etc/apt
++ Registry: Implement the new registry
 
 
 ## 0.2.2 (2013-05-03)
 ## 0.2.2 (2013-05-03)
- + Support for data volumes ('docker run -v=PATH')
- + Share data volumes between containers ('docker run -volumes-from')
- + Improved documentation
- * Upgrade to Go 1.0.3
- * Various upgrades to the dev environment for contributors
+
++ Support for data volumes ('docker run -v=PATH')
++ Share data volumes between containers ('docker run -volumes-from')
++ Improved documentation
+* Upgrade to Go 1.0.3
+* Various upgrades to the dev environment for contributors
 
 
 ## 0.2.1 (2013-05-01)
 ## 0.2.1 (2013-05-01)
- + 'docker commit -run' bundles a layer with default runtime options: command, ports etc.
- * Improve install process on Vagrant
- + New Dockerfile operation: "maintainer"
- + New Dockerfile operation: "expose"
- + New Dockerfile operation: "cmd"
- + Contrib script to build a Debian base layer
- + 'docker -d -r': restart crashed containers at daemon startup
- * Runtime: improve test coverage
+
++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc.
+* Improve install process on Vagrant
++ New Dockerfile operation: "maintainer"
++ New Dockerfile operation: "expose"
++ New Dockerfile operation: "cmd"
++ Contrib script to build a Debian base layer
++ 'docker -d -r': restart crashed containers at daemon startup
+* Runtime: improve test coverage
 
 
 ## 0.2.0 (2013-04-23)
 ## 0.2.0 (2013-04-23)
- - Runtime: ghost containers can be killed and waited for
- * Documentation: update install intructions
- - Packaging: fix Vagrantfile
- - Development: automate releasing binaries and ubuntu packages
- + Add a changelog
- - Various bugfixes
+
+- Runtime: ghost containers can be killed and waited for
+* Documentation: update install intructions
+- Packaging: fix Vagrantfile
+- Development: automate releasing binaries and ubuntu packages
++ Add a changelog
+- Various bugfixes
 
 
 ## 0.1.8 (2013-04-22)
 ## 0.1.8 (2013-04-22)
- - Dynamically detect cgroup capabilities
- - Issue stability warning on kernels <3.8
- - 'docker push' buffers on disk instead of memory
- - Fix 'docker diff' for removed files
- - Fix 'docker stop' for ghost containers
- - Fix handling of pidfile
- - Various bugfixes and stability improvements
+
+- Dynamically detect cgroup capabilities
+- Issue stability warning on kernels <3.8
+- 'docker push' buffers on disk instead of memory
+- Fix 'docker diff' for removed files
+- Fix 'docker stop' for ghost containers
+- Fix handling of pidfile
+- Various bugfixes and stability improvements
 
 
 ## 0.1.7 (2013-04-18)
 ## 0.1.7 (2013-04-18)
- - Container ports are available on localhost
- - 'docker ps' shows allocated TCP ports
- - Contributors can run 'make hack' to start a continuous integration VM
- - Streamline ubuntu packaging & uploading
- - Various bugfixes and stability improvements
+
+- Container ports are available on localhost
+- 'docker ps' shows allocated TCP ports
+- Contributors can run 'make hack' to start a continuous integration VM
+- Streamline ubuntu packaging & uploading
+- Various bugfixes and stability improvements
 
 
 ## 0.1.6 (2013-04-17)
 ## 0.1.6 (2013-04-17)
- - Record the author an image with 'docker commit -author'
+
+- Record the author an image with 'docker commit -author'
 
 
 ## 0.1.5 (2013-04-17)
 ## 0.1.5 (2013-04-17)
- - Disable standalone mode
- - Use a custom DNS resolver with 'docker -d -dns'
- - Detect ghost containers
- - Improve diagnosis of missing system capabilities
- - Allow disabling memory limits at compile time
- - Add debian packaging
- - Documentation: installing on Arch Linux
- - Documentation: running Redis on docker
- - Fixed lxc 0.9 compatibility
- - Automatically load aufs module
- - Various bugfixes and stability improvements
+
+- Disable standalone mode
+- Use a custom DNS resolver with 'docker -d -dns'
+- Detect ghost containers
+- Improve diagnosis of missing system capabilities
+- Allow disabling memory limits at compile time
+- Add debian packaging
+- Documentation: installing on Arch Linux
+- Documentation: running Redis on docker
+- Fixed lxc 0.9 compatibility
+- Automatically load aufs module
+- Various bugfixes and stability improvements
 
 
 ## 0.1.4 (2013-04-09)
 ## 0.1.4 (2013-04-09)
- - Full support for TTY emulation
- - Detach from a TTY session with the escape sequence `C-p C-q`
- - Various bugfixes and stability improvements
- - Minor UI improvements
- - Automatically create our own bridge interface 'docker0'
+
+- Full support for TTY emulation
+- Detach from a TTY session with the escape sequence `C-p C-q`
+- Various bugfixes and stability improvements
+- Minor UI improvements
+- Automatically create our own bridge interface 'docker0'
 
 
 ## 0.1.3 (2013-04-04)
 ## 0.1.3 (2013-04-04)
- - Choose TCP frontend port with '-p :PORT'
- - Layer format is versioned
- - Major reliability improvements to the process manager
- - Various bugfixes and stability improvements
+
+- Choose TCP frontend port with '-p :PORT'
+- Layer format is versioned
+- Major reliability improvements to the process manager
+- Various bugfixes and stability improvements
 
 
 ## 0.1.2 (2013-04-03)
 ## 0.1.2 (2013-04-03)
- - Set container hostname with 'docker run -h'
- - Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
- - Various bugfixes and stability improvements
- - UI polish
- - Progress bar on push/pull
- - Use XZ compression by default
- - Make IP allocator lazy
+
+- Set container hostname with 'docker run -h'
+- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
+- Various bugfixes and stability improvements
+- UI polish
+- Progress bar on push/pull
+- Use XZ compression by default
+- Make IP allocator lazy
 
 
 ## 0.1.1 (2013-03-31)
 ## 0.1.1 (2013-03-31)
- - Display shorthand IDs for convenience
- - Stabilize process management
- - Layers can include a commit message
- - Simplified 'docker attach'
- - Fixed support for re-attaching
- - Various bugfixes and stability improvements
- - Auto-download at run
- - Auto-login on push
- - Beefed up documentation
+
+- Display shorthand IDs for convenience
+- Stabilize process management
+- Layers can include a commit message
+- Simplified 'docker attach'
+- Fixed support for re-attaching
+- Various bugfixes and stability improvements
+- Auto-download at run
+- Auto-login on push
+- Beefed up documentation
 
 
 ## 0.1.0 (2013-03-23)
 ## 0.1.0 (2013-03-23)
- - First release
- - Implement registry in order to push/pull images
- - TCP port allocation
- - Fix termcaps on Linux
- - Add documentation
- - Add Vagrant support with Vagrantfile
- - Add unit tests
- - Add repository/tags to ease image management
- - Improve the layer implementation
+
+Initial public release
+
+- Implement registry in order to push/pull images
+- TCP port allocation
+- Fix termcaps on Linux
+- Add documentation
+- Add Vagrant support with Vagrantfile
+- Add unit tests
+- Add repository/tags to ease image management
+- Improve the layer implementation

+ 19 - 2
CONTRIBUTING.md

@@ -3,6 +3,10 @@
 Want to hack on Docker? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels
 Want to hack on Docker? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels
 wrong or incomplete.
 wrong or incomplete.
 
 
+## Build Environment
+
+For instructions on setting up your development environment, please see our dedicated [dev environment setup docs](http://docs.docker.io/en/latest/contributing/devenvironment/).
+
 ## Contribution guidelines
 ## Contribution guidelines
 
 
 ### Pull requests are always welcome
 ### Pull requests are always welcome
@@ -55,8 +59,10 @@ Submit unit tests for your changes.  Go has a great test framework built in; use
 it! Take a look at existing tests for inspiration. Run the full test suite on
 it! Take a look at existing tests for inspiration. Run the full test suite on
 your branch before submitting a pull request.
 your branch before submitting a pull request.
 
 
-Make sure you include relevant updates or additions to documentation when
-creating or modifying features.
+Update the documentation when creating or modifying features. Test
+your documentation changes for clarity, concision, and correctness, as
+well as a clean docmuent build. See ``docs/README.md`` for more
+information on building the docs and how docs get released.
 
 
 Write clean code. Universally formatted code promotes ease of writing, reading,
 Write clean code. Universally formatted code promotes ease of writing, reading,
 and maintenance. Always run `go fmt` before committing your changes. Most
 and maintenance. Always run `go fmt` before committing your changes. Most
@@ -89,6 +95,17 @@ name and email address match your git configuration. The AUTHORS file is
 regenerated occasionally from the git commit history, so a mismatch may result
 regenerated occasionally from the git commit history, so a mismatch may result
 in your changes being overwritten.
 in your changes being overwritten.
 
 
+### Approval
+
+Docker maintainers use LGTM (looks good to me) in comments on the code review
+to indicate acceptance.
+
+A change requires LGTMs from an absolute majority of the maintainers of each
+component affected. For example, if a change affects docs/ and registry/, it
+needs an absolute majority from the maintainers of docs/ AND, separately, an
+absolute majority of the maintainers of registry
+
+For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
 
 
 ### How can I become a maintainer?
 ### How can I become a maintainer?
 
 

+ 7 - 8
Dockerfile

@@ -12,7 +12,7 @@
 #
 #
 #
 #
 # # Run the test suite:
 # # Run the test suite:
-# docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker go test -v
+# docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh test
 #
 #
 # # Publish a release:
 # # Publish a release:
 # docker run -privileged -lxc-conf=lxc.aa_profile=unconfined \
 # docker run -privileged -lxc-conf=lxc.aa_profile=unconfined \
@@ -33,15 +33,13 @@ run	apt-get update
 run	apt-get install -y -q curl
 run	apt-get install -y -q curl
 run	apt-get install -y -q git
 run	apt-get install -y -q git
 run	apt-get install -y -q mercurial
 run	apt-get install -y -q mercurial
-run	apt-get install -y -q build-essential
+run apt-get install -y -q build-essential libsqlite3-dev
 
 
-# Install Go from source (for eventual cross-compiling)
-env	CGO_ENABLED 0
-run	curl -s https://go.googlecode.com/files/go1.1.2.src.tar.gz | tar -v -C / -xz && mv /go /goroot
-run	cd /goroot/src && ./make.bash
-env GOROOT	/goroot
-env	PATH	$PATH:/goroot/bin
+# Install Go
+run	curl -s https://go.googlecode.com/files/go1.2rc2.src.tar.gz | tar -v -C /usr/local -xz
+env	PATH	/usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
 env	GOPATH	/go:/go/src/github.com/dotcloud/docker/vendor
 env	GOPATH	/go:/go/src/github.com/dotcloud/docker/vendor
+run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
 
 
 # Ubuntu stuff
 # Ubuntu stuff
 run	apt-get install -y -q ruby1.9.3 rubygems libffi-dev
 run	apt-get install -y -q ruby1.9.3 rubygems libffi-dev
@@ -57,6 +55,7 @@ run	/bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_
 # Runtime dependencies
 # Runtime dependencies
 run	apt-get install -y -q iptables
 run	apt-get install -y -q iptables
 run	apt-get install -y -q lxc
 run	apt-get install -y -q lxc
+run	apt-get install -y -q aufs-tools
 
 
 volume	/var/lib/docker
 volume	/var/lib/docker
 workdir	/go/src/github.com/dotcloud/docker
 workdir	/go/src/github.com/dotcloud/docker

+ 2 - 3
NOTICE

@@ -1,8 +1,7 @@
 Docker
 Docker
-Copyright 2012-2013 dotCloud, inc.
+Copyright 2012-2013 Docker, Inc.
 
 
-This product includes software developed at dotCloud,
-inc. (http://www.dotcloud.com).
+This product includes software developed at Docker, Inc. (http://www.docker.com).
 
 
 This product contains software (https://github.com/kr/pty) developed
 This product contains software (https://github.com/kr/pty) developed
 by Keith Rarick, licensed under the MIT License.
 by Keith Rarick, licensed under the MIT License.

+ 6 - 6
README.md

@@ -193,10 +193,10 @@ wrong or incomplete.
 *Brought to you courtesy of our legal counsel. For more context,
 *Brought to you courtesy of our legal counsel. For more context,
 please see the Notice document.*
 please see the Notice document.*
 
 
-Transfers of Docker shall be in accordance with applicable export
-controls of any country and all other applicable legal requirements.
-Docker shall not be distributed or downloaded to or in Cuba, Iran,
-North Korea, Sudan or Syria and shall not be distributed or downloaded
-to any person on the Denied Persons List administered by the U.S.
-Department of Commerce.
+Transfers of Docker shall be in accordance with applicable export controls 
+of any country and all other applicable legal requirements. Without limiting the 
+foregoing, Docker shall not be distributed or downloaded to any individual or 
+location if such distribution or download would violate the applicable US 
+government export regulations. 
 
 
+For more information, please see http://www.bis.doc.gov

+ 1 - 1
VERSION

@@ -1 +1 @@
-0.6.3-dev
+0.6.5-dev

+ 4 - 0
Vagrantfile

@@ -39,6 +39,8 @@ Vagrant::Config.run do |config|
         "echo 'Installation of VBox Guest Additions is proceeding in the background.'; " \
         "echo 'Installation of VBox Guest Additions is proceeding in the background.'; " \
         "echo '\"vagrant reload\" can be used in about 2 minutes to activate the new guest additions.'; "
         "echo '\"vagrant reload\" can be used in about 2 minutes to activate the new guest additions.'; "
     end
     end
+    # Add vagrant user to the docker group
+    pkg_cmd << "usermod -a -G docker vagrant; "
     # Activate new kernel
     # Activate new kernel
     pkg_cmd << "shutdown -r +1; "
     pkg_cmd << "shutdown -r +1; "
     config.vm.provision :shell, :inline => pkg_cmd
     config.vm.provision :shell, :inline => pkg_cmd
@@ -78,6 +80,8 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
   config.vm.provider :virtualbox do |vb|
   config.vm.provider :virtualbox do |vb|
     config.vm.box = BOX_NAME
     config.vm.box = BOX_NAME
     config.vm.box_url = BOX_URI
     config.vm.box_url = BOX_URI
+    vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
+    vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"]
   end
   end
 end
 end
 
 

+ 54 - 17
api.go

@@ -42,6 +42,9 @@ func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
 
 
 //If we don't do this, POST method without Content-type (even with empty body) will fail
 //If we don't do this, POST method without Content-type (even with empty body) will fail
 func parseForm(r *http.Request) error {
 func parseForm(r *http.Request) error {
+	if r == nil {
+		return nil
+	}
 	if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
 	if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
 		return err
 		return err
 	}
 	}
@@ -70,8 +73,11 @@ func httpError(w http.ResponseWriter, err error) {
 	} else if strings.Contains(err.Error(), "hasn't been activated") {
 	} else if strings.Contains(err.Error(), "hasn't been activated") {
 		statusCode = http.StatusForbidden
 		statusCode = http.StatusForbidden
 	}
 	}
-	utils.Debugf("[error %d] %s", statusCode, err)
-	http.Error(w, err.Error(), statusCode)
+
+	if err != nil {
+		utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
+		http.Error(w, err.Error(), statusCode)
+	}
 }
 }
 
 
 func writeJSON(w http.ResponseWriter, code int, v interface{}) error {
 func writeJSON(w http.ResponseWriter, code int, v interface{}) error {
@@ -102,7 +108,7 @@ func getBoolParam(value string) (bool, error) {
 func matchesContentType(contentType, expectedType string) bool {
 func matchesContentType(contentType, expectedType string) bool {
 	mimetype, _, err := mime.ParseMediaType(contentType)
 	mimetype, _, err := mime.ParseMediaType(contentType)
 	if err != nil {
 	if err != nil {
-		utils.Debugf("Error parsing media type: %s error: %s", contentType, err.Error())
+		utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
 	}
 	}
 	return err == nil && mimetype == expectedType
 	return err == nil && mimetype == expectedType
 }
 }
@@ -132,8 +138,23 @@ func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *
 	if vars == nil {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 		return fmt.Errorf("Missing parameter")
 	}
 	}
+	if err := parseForm(r); err != nil {
+		return err
+	}
 	name := vars["name"]
 	name := vars["name"]
-	if err := srv.ContainerKill(name); err != nil {
+
+	signal := 0
+	if r != nil {
+		s := r.Form.Get("signal")
+		if s != "" {
+			if s, err := strconv.Atoi(s); err != nil {
+				return err
+			} else {
+				signal = s
+			}
+		}
+	}
+	if err := srv.ContainerKill(name, signal); err != nil {
 		return err
 		return err
 	}
 	}
 	w.WriteHeader(http.StatusNoContent)
 	w.WriteHeader(http.StatusNoContent)
@@ -147,7 +168,7 @@ func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r
 	name := vars["name"]
 	name := vars["name"]
 
 
 	if err := srv.ContainerExport(name, w); err != nil {
 	if err := srv.ContainerExport(name, w); err != nil {
-		utils.Debugf("%s", err)
+		utils.Errorf("%s", err)
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
@@ -192,7 +213,7 @@ func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
 		_, err = wf.Write(b)
 		_, err = wf.Write(b)
 		if err != nil {
 		if err != nil {
 			// On error, evict the listener
 			// On error, evict the listener
-			utils.Debugf("%s", err)
+			utils.Errorf("%s", err)
 			srv.Lock()
 			srv.Lock()
 			delete(srv.listeners, r.RemoteAddr)
 			delete(srv.listeners, r.RemoteAddr)
 			srv.Unlock()
 			srv.Unlock()
@@ -346,8 +367,8 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req
 		return err
 		return err
 	}
 	}
 	config := &Config{}
 	config := &Config{}
-	if err := json.NewDecoder(r.Body).Decode(config); err != nil {
-		utils.Debugf("%s", err)
+	if err := json.NewDecoder(r.Body).Decode(config); err != nil && err != io.EOF {
+		utils.Errorf("%s", err)
 	}
 	}
 	repo := r.Form.Get("repo")
 	repo := r.Form.Get("repo")
 	tag := r.Form.Get("tag")
 	tag := r.Form.Get("tag")
@@ -500,8 +521,12 @@ func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http
 }
 }
 
 
 func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
+		return nil
+	}
 	config := &Config{}
 	config := &Config{}
 	out := &APIRun{}
 	out := &APIRun{}
+	name := r.Form.Get("name")
 
 
 	if err := json.NewDecoder(r.Body).Decode(config); err != nil {
 	if err := json.NewDecoder(r.Body).Decode(config); err != nil {
 		return err
 		return err
@@ -512,16 +537,19 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r
 		return err
 		return err
 	}
 	}
 
 
-	if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
+	if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
 		out.Warnings = append(out.Warnings, fmt.Sprintf("Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns))
 		out.Warnings = append(out.Warnings, fmt.Sprintf("Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns))
 		config.Dns = defaultDns
 		config.Dns = defaultDns
 	}
 	}
 
 
-	id, err := srv.ContainerCreate(config)
+	id, warnings, err := srv.ContainerCreate(config, name)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 	out.ID = id
 	out.ID = id
+	for _, warning := range warnings {
+		out.Warnings = append(out.Warnings, warning)
+	}
 
 
 	if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
 	if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
 		log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
 		log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
@@ -567,12 +595,17 @@ func deleteContainers(srv *Server, version float64, w http.ResponseWriter, r *ht
 		return fmt.Errorf("Missing parameter")
 		return fmt.Errorf("Missing parameter")
 	}
 	}
 	name := vars["name"]
 	name := vars["name"]
+
 	removeVolume, err := getBoolParam(r.Form.Get("v"))
 	removeVolume, err := getBoolParam(r.Form.Get("v"))
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
+	removeLink, err := getBoolParam(r.Form.Get("link"))
+	if err != nil {
+		return err
+	}
 
 
-	if err := srv.ContainerDestroy(name, removeVolume); err != nil {
+	if err := srv.ContainerDestroy(name, removeVolume, removeLink); err != nil {
 		return err
 		return err
 	}
 	}
 	w.WriteHeader(http.StatusNoContent)
 	w.WriteHeader(http.StatusNoContent)
@@ -619,6 +652,10 @@ func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r
 		return fmt.Errorf("Missing parameter")
 		return fmt.Errorf("Missing parameter")
 	}
 	}
 	name := vars["name"]
 	name := vars["name"]
+	// Register any links from the host config before starting the container
+	if err := srv.RegisterLinks(name, hostConfig); err != nil {
+		return err
+	}
 	if err := srv.ContainerStart(name, hostConfig); err != nil {
 	if err := srv.ContainerStart(name, hostConfig); err != nil {
 		return err
 		return err
 	}
 	}
@@ -652,6 +689,7 @@ func postContainersWait(srv *Server, version float64, w http.ResponseWriter, r *
 		return fmt.Errorf("Missing parameter")
 		return fmt.Errorf("Missing parameter")
 	}
 	}
 	name := vars["name"]
 	name := vars["name"]
+
 	status, err := srv.ContainerWait(name)
 	status, err := srv.ContainerWait(name)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -792,7 +830,7 @@ func wsContainersAttach(srv *Server, version float64, w http.ResponseWriter, r *
 		defer ws.Close()
 		defer ws.Close()
 
 
 		if err := srv.ContainerAttach(name, logs, stream, stdin, stdout, stderr, ws, ws, ws); err != nil {
 		if err := srv.ContainerAttach(name, logs, stream, stdin, stdout, stderr, ws, ws, ws); err != nil {
-			utils.Debugf("Error: %s", err)
+			utils.Errorf("Error: %s", err)
 		}
 		}
 	})
 	})
 	h.ServeHTTP(w, r)
 	h.ServeHTTP(w, r)
@@ -905,8 +943,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
 	b := NewBuildFile(srv, utils.NewWriteFlusher(w), !suppressOutput, !noCache, rm)
 	b := NewBuildFile(srv, utils.NewWriteFlusher(w), !suppressOutput, !noCache, rm)
 	id, err := b.Build(context)
 	id, err := b.Build(context)
 	if err != nil {
 	if err != nil {
-		fmt.Fprintf(w, "Error build: %s\n", err)
-		return err
+		return fmt.Errorf("Error build: %s", err)
 	}
 	}
 	if repoName != "" {
 	if repoName != "" {
 		srv.runtime.repositories.Set(repoName, tag, id, false)
 		srv.runtime.repositories.Set(repoName, tag, id, false)
@@ -938,7 +975,7 @@ func postContainersCopy(srv *Server, version float64, w http.ResponseWriter, r *
 	}
 	}
 
 
 	if err := srv.ContainerCopy(name, copyData.Resource, w); err != nil {
 	if err := srv.ContainerCopy(name, copyData.Resource, w); err != nil {
-		utils.Debugf("%s", err.Error())
+		utils.Errorf("%s", err.Error())
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
@@ -973,7 +1010,7 @@ func makeHttpHandler(srv *Server, logging bool, localMethod string, localRoute s
 		if err != nil {
 		if err != nil {
 			version = APIVERSION
 			version = APIVERSION
 		}
 		}
-		if srv.enableCors {
+		if srv.runtime.config.EnableCors {
 			writeCorsHeaders(w, r)
 			writeCorsHeaders(w, r)
 		}
 		}
 
 
@@ -983,7 +1020,7 @@ func makeHttpHandler(srv *Server, logging bool, localMethod string, localRoute s
 		}
 		}
 
 
 		if err := handlerFunc(srv, version, w, r, mux.Vars(r)); err != nil {
 		if err := handlerFunc(srv, version, w, r, mux.Vars(r)); err != nil {
-			utils.Debugf("Error: %s", err)
+			utils.Errorf("Error: %s", err)
 			httpError(w, err)
 			httpError(w, err)
 		}
 		}
 	}
 	}

+ 10 - 17
api_params.go

@@ -1,12 +1,11 @@
 package docker
 package docker
 
 
-import "encoding/json"
-
 type APIHistory struct {
 type APIHistory struct {
 	ID        string   `json:"Id"`
 	ID        string   `json:"Id"`
 	Tags      []string `json:",omitempty"`
 	Tags      []string `json:",omitempty"`
 	Created   int64
 	Created   int64
 	CreatedBy string `json:",omitempty"`
 	CreatedBy string `json:",omitempty"`
+	Size      int64
 }
 }
 
 
 type APIImages struct {
 type APIImages struct {
@@ -52,17 +51,18 @@ type APIContainers struct {
 	Ports      []APIPort
 	Ports      []APIPort
 	SizeRw     int64
 	SizeRw     int64
 	SizeRootFs int64
 	SizeRootFs int64
+	Names      []string
 }
 }
 
 
 func (self *APIContainers) ToLegacy() APIContainersOld {
 func (self *APIContainers) ToLegacy() APIContainersOld {
 	return APIContainersOld{
 	return APIContainersOld{
-		ID: self.ID,
-		Image: self.Image,
-		Command: self.Command,
-		Created: self.Created,
-		Status: self.Status,
-		Ports: displayablePorts(self.Ports),
-		SizeRw: self.SizeRw,
+		ID:         self.ID,
+		Image:      self.Image,
+		Command:    self.Command,
+		Created:    self.Created,
+		Status:     self.Status,
+		Ports:      displayablePorts(self.Ports),
+		SizeRw:     self.SizeRw,
 		SizeRootFs: self.SizeRootFs,
 		SizeRootFs: self.SizeRootFs,
 	}
 	}
 }
 }
@@ -96,14 +96,7 @@ type APIPort struct {
 	PrivatePort int64
 	PrivatePort int64
 	PublicPort  int64
 	PublicPort  int64
 	Type        string
 	Type        string
-}
-
-func (port *APIPort) MarshalJSON() ([]byte, error) {
-	return json.Marshal(map[string]interface{}{
-		"PrivatePort": port.PrivatePort,
-		"PublicPort":  port.PublicPort,
-		"Type":        port.Type,
-	})
+	IP          string
 }
 }
 
 
 type APIVersion struct {
 type APIVersion struct {

+ 70 - 28
api_test.go

@@ -5,6 +5,7 @@ import (
 	"bufio"
 	"bufio"
 	"bytes"
 	"bytes"
 	"encoding/json"
 	"encoding/json"
+	"fmt"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"net"
 	"net"
@@ -12,6 +13,7 @@ import (
 	"net/http/httptest"
 	"net/http/httptest"
 	"os"
 	"os"
 	"path"
 	"path"
+	"strings"
 	"testing"
 	"testing"
 	"time"
 	"time"
 )
 )
@@ -40,6 +42,25 @@ func TestGetBoolParam(t *testing.T) {
 	}
 	}
 }
 }
 
 
+func TesthttpError(t *testing.T) {
+	r := httptest.NewRecorder()
+
+	httpError(r, fmt.Errorf("No such method"))
+	if r.Code != http.StatusNotFound {
+		t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
+	}
+
+	httpError(r, fmt.Errorf("This accound hasn't been activated"))
+	if r.Code != http.StatusForbidden {
+		t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
+	}
+
+	httpError(r, fmt.Errorf("Some error"))
+	if r.Code != http.StatusInternalServerError {
+		t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
+	}
+}
+
 func TestGetVersion(t *testing.T) {
 func TestGetVersion(t *testing.T) {
 	var err error
 	var err error
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
@@ -91,6 +112,7 @@ func TestGetInfo(t *testing.T) {
 
 
 func TestGetEvents(t *testing.T) {
 func TestGetEvents(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
+	defer nuke(runtime)
 	srv := &Server{
 	srv := &Server{
 		runtime:   runtime,
 		runtime:   runtime,
 		events:    make([]utils.JSONMessage, 0, 64),
 		events:    make([]utils.JSONMessage, 0, 64),
@@ -243,7 +265,11 @@ func TestGetImagesJSON(t *testing.T) {
 		t.Fatalf("Error expected, received none")
 		t.Fatalf("Error expected, received none")
 	}
 	}
 
 
-	httpError(r4, err)
+	if !strings.HasPrefix(err.Error(), "Bad parameter") {
+		t.Fatalf("Error should starts with \"Bad parameter\"")
+	}
+	http.Error(r4, err.Error(), http.StatusBadRequest)
+
 	if r4.Code != http.StatusBadRequest {
 	if r4.Code != http.StatusBadRequest {
 		t.Fatalf("%d Bad Request expected, received %d\n", http.StatusBadRequest, r4.Code)
 		t.Fatalf("%d Bad Request expected, received %d\n", http.StatusBadRequest, r4.Code)
 	}
 	}
@@ -321,10 +347,12 @@ func TestGetContainersJSON(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(&Config{
+	beginLen := runtime.containers.Len()
+
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"echo", "test"},
 		Cmd:   []string{"echo", "test"},
-	})
+	}, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -343,8 +371,8 @@ func TestGetContainersJSON(t *testing.T) {
 	if err := json.Unmarshal(r.Body.Bytes(), &containers); err != nil {
 	if err := json.Unmarshal(r.Body.Bytes(), &containers); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	if len(containers) != 1 {
-		t.Fatalf("Expected %d container, %d found", 1, len(containers))
+	if len(containers) != beginLen+1 {
+		t.Fatalf("Expected %d container, %d found (started with: %d)", beginLen+1, len(containers), beginLen)
 	}
 	}
 	if containers[0].ID != container.ID {
 	if containers[0].ID != container.ID {
 		t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", container.ID, containers[0].ID)
 		t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", container.ID, containers[0].ID)
@@ -358,11 +386,12 @@ func TestGetContainersExport(t *testing.T) {
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image: GetTestImage(runtime).ID,
 			Image: GetTestImage(runtime).ID,
 			Cmd:   []string{"touch", "/test"},
 			Cmd:   []string{"touch", "/test"},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -408,11 +437,12 @@ func TestGetContainersChanges(t *testing.T) {
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image: GetTestImage(runtime).ID,
 			Image: GetTestImage(runtime).ID,
 			Cmd:   []string{"/bin/rm", "/etc/passwd"},
 			Cmd:   []string{"/bin/rm", "/etc/passwd"},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -446,20 +476,18 @@ func TestGetContainersChanges(t *testing.T) {
 
 
 func TestGetContainersTop(t *testing.T) {
 func TestGetContainersTop(t *testing.T) {
 	t.Skip("Fixme. Skipping test for now. Reported error when testing using dind: 'api_test.go:527: Expected 2 processes, found 0.'")
 	t.Skip("Fixme. Skipping test for now. Reported error when testing using dind: 'api_test.go:527: Expected 2 processes, found 0.'")
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
+	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).ID,
 			Image:     GetTestImage(runtime).ID,
 			Cmd:       []string{"/bin/sh", "-c", "cat"},
 			Cmd:       []string{"/bin/sh", "-c", "cat"},
 			OpenStdin: true,
 			OpenStdin: true,
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -536,11 +564,12 @@ func TestGetContainersByName(t *testing.T) {
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image: GetTestImage(runtime).ID,
 			Image: GetTestImage(runtime).ID,
 			Cmd:   []string{"echo", "test"},
 			Cmd:   []string{"echo", "test"},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -567,11 +596,12 @@ func TestPostCommit(t *testing.T) {
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image: GetTestImage(runtime).ID,
 			Image: GetTestImage(runtime).ID,
 			Cmd:   []string{"touch", "/test"},
 			Cmd:   []string{"touch", "/test"},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -661,12 +691,13 @@ func TestPostContainersKill(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).ID,
 			Image:     GetTestImage(runtime).ID,
 			Cmd:       []string{"/bin/cat"},
 			Cmd:       []string{"/bin/cat"},
 			OpenStdin: true,
 			OpenStdin: true,
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -703,12 +734,13 @@ func TestPostContainersRestart(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).ID,
 			Image:     GetTestImage(runtime).ID,
-			Cmd:       []string{"/bin/cat"},
+			Cmd:       []string{"/bin/top"},
 			OpenStdin: true,
 			OpenStdin: true,
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -757,12 +789,13 @@ func TestPostContainersStart(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).ID,
 			Image:     GetTestImage(runtime).ID,
 			Cmd:       []string{"/bin/cat"},
 			Cmd:       []string{"/bin/cat"},
 			OpenStdin: true,
 			OpenStdin: true,
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -776,6 +809,8 @@ func TestPostContainersStart(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
+	req.Header.Set("Content-Type", "application/json")
+
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 	if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
 	if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -807,12 +842,13 @@ func TestPostContainersStop(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).ID,
 			Image:     GetTestImage(runtime).ID,
-			Cmd:       []string{"/bin/cat"},
+			Cmd:       []string{"/bin/top"},
 			OpenStdin: true,
 			OpenStdin: true,
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -854,12 +890,13 @@ func TestPostContainersWait(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).ID,
 			Image:     GetTestImage(runtime).ID,
 			Cmd:       []string{"/bin/sleep", "1"},
 			Cmd:       []string{"/bin/sleep", "1"},
 			OpenStdin: true,
 			OpenStdin: true,
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -896,12 +933,13 @@ func TestPostContainersAttach(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).ID,
 			Image:     GetTestImage(runtime).ID,
 			Cmd:       []string{"/bin/cat"},
 			Cmd:       []string{"/bin/cat"},
 			OpenStdin: true,
 			OpenStdin: true,
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -985,12 +1023,13 @@ func TestPostContainersAttachStderr(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:     GetTestImage(runtime).ID,
 			Image:     GetTestImage(runtime).ID,
 			Cmd:       []string{"/bin/sh", "-c", "/bin/cat >&2"},
 			Cmd:       []string{"/bin/sh", "-c", "/bin/cat >&2"},
 			OpenStdin: true,
 			OpenStdin: true,
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1077,10 +1116,10 @@ func TestDeleteContainers(t *testing.T) {
 
 
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"touch", "/test"},
 		Cmd:   []string{"touch", "/test"},
-	})
+	}, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -1115,7 +1154,8 @@ func TestOptionsRoute(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	srv := &Server{runtime: runtime, enableCors: true}
+	runtime.config.EnableCors = true
+	srv := &Server{runtime: runtime}
 
 
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 	router, err := createRouter(srv, false)
 	router, err := createRouter(srv, false)
@@ -1138,7 +1178,8 @@ func TestGetEnabledCors(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	srv := &Server{runtime: runtime, enableCors: true}
+	runtime.config.EnableCors = true
+	srv := &Server{runtime: runtime}
 
 
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 
 
@@ -1265,11 +1306,12 @@ func TestPostContainersCopy(t *testing.T) {
 	srv := &Server{runtime: runtime}
 	srv := &Server{runtime: runtime}
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image: GetTestImage(runtime).ID,
 			Image: GetTestImage(runtime).ID,
 			Cmd:   []string{"touch", "/test.txt"},
 			Cmd:   []string{"touch", "/test.txt"},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)

+ 4 - 1
auth/auth_test.go

@@ -76,7 +76,7 @@ func TestCreateAccount(t *testing.T) {
 }
 }
 
 
 func setupTempConfigFile() (*ConfigFile, error) {
 func setupTempConfigFile() (*ConfigFile, error) {
-	root, err := ioutil.TempDir("", "docker-test")
+	root, err := ioutil.TempDir("", "docker-test-auth")
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -101,6 +101,7 @@ func TestSameAuthDataPostSave(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
+	defer os.RemoveAll(configFile.rootPath)
 
 
 	err = SaveConfig(configFile)
 	err = SaveConfig(configFile)
 	if err != nil {
 	if err != nil {
@@ -127,6 +128,7 @@ func TestResolveAuthConfigIndexServer(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
+	defer os.RemoveAll(configFile.rootPath)
 
 
 	for _, registry := range []string{"", IndexServerAddress()} {
 	for _, registry := range []string{"", IndexServerAddress()} {
 		resolved := configFile.ResolveAuthConfig(registry)
 		resolved := configFile.ResolveAuthConfig(registry)
@@ -141,6 +143,7 @@ func TestResolveAuthConfigFullURL(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
+	defer os.RemoveAll(configFile.rootPath)
 
 
 	registryAuth := AuthConfig{
 	registryAuth := AuthConfig{
 		Username: "foo-user",
 		Username: "foo-user",

+ 17 - 8
buildfile.go

@@ -332,7 +332,7 @@ func (b *buildFile) CmdAdd(args string) error {
 
 
 	b.config.Image = b.image
 	b.config.Image = b.image
 	// Create the container and start it
 	// Create the container and start it
-	container, err := b.runtime.Create(b.config)
+	container, _, err := b.runtime.Create(b.config, "")
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -367,7 +367,7 @@ func (b *buildFile) run() (string, error) {
 	b.config.Image = b.image
 	b.config.Image = b.image
 
 
 	// Create the container and start it
 	// Create the container and start it
-	c, err := b.runtime.Create(b.config)
+	c, _, err := b.runtime.Create(b.config, "")
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
@@ -378,15 +378,22 @@ func (b *buildFile) run() (string, error) {
 	c.Path = b.config.Cmd[0]
 	c.Path = b.config.Cmd[0]
 	c.Args = b.config.Cmd[1:]
 	c.Args = b.config.Cmd[1:]
 
 
+	var errCh chan error
+
+	if b.verbose {
+		errCh = utils.Go(func() error {
+			return <-c.Attach(nil, nil, b.out, b.out)
+		})
+	}
+
 	//start the container
 	//start the container
 	hostConfig := &HostConfig{}
 	hostConfig := &HostConfig{}
 	if err := c.Start(hostConfig); err != nil {
 	if err := c.Start(hostConfig); err != nil {
 		return "", err
 		return "", err
 	}
 	}
 
 
-	if b.verbose {
-		err = <-c.Attach(nil, nil, b.out, b.out)
-		if err != nil {
+	if errCh != nil {
+		if err := <-errCh; err != nil {
 			return "", err
 			return "", err
 		}
 		}
 	}
 	}
@@ -423,10 +430,13 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 			}
 			}
 		}
 		}
 
 
-		container, err := b.runtime.Create(b.config)
+		container, warnings, err := b.runtime.Create(b.config, "")
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
+		for _, warning := range warnings {
+			fmt.Fprintf(b.out, " ---> [Warning] %s\n", warning)
+		}
 		b.tmpContainers[container.ID] = struct{}{}
 		b.tmpContainers[container.ID] = struct{}{}
 		fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
 		fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
 		id = container.ID
 		id = container.ID
@@ -458,9 +468,8 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`)
 var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`)
 
 
 func (b *buildFile) Build(context io.Reader) (string, error) {
 func (b *buildFile) Build(context io.Reader) (string, error) {
-	// FIXME: @creack any reason for using /tmp instead of ""?
 	// FIXME: @creack "name" is a terrible variable name
 	// FIXME: @creack "name" is a terrible variable name
-	name, err := ioutil.TempDir("/tmp", "docker-build")
+	name, err := ioutil.TempDir("", "docker-build")
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}

+ 6 - 24
buildfile_test.go

@@ -229,10 +229,7 @@ func TestBuild(t *testing.T) {
 
 
 func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache bool) *Image {
 func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache bool) *Image {
 	if srv == nil {
 	if srv == nil {
-		runtime, err := newTestRuntime()
-		if err != nil {
-			t.Fatal(err)
-		}
+		runtime := mkRuntime(t)
 		defer nuke(runtime)
 		defer nuke(runtime)
 
 
 		srv = &Server{
 		srv = &Server{
@@ -370,10 +367,7 @@ func TestBuildEntrypoint(t *testing.T) {
 // testing #1405 - config.Cmd does not get cleaned up if
 // testing #1405 - config.Cmd does not get cleaned up if
 // utilizing cache
 // utilizing cache
 func TestBuildEntrypointRunCleanup(t *testing.T) {
 func TestBuildEntrypointRunCleanup(t *testing.T) {
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
+	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
 	srv := &Server{
 	srv := &Server{
@@ -402,10 +396,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
 }
 }
 
 
 func TestBuildImageWithCache(t *testing.T) {
 func TestBuildImageWithCache(t *testing.T) {
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
+	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
 	srv := &Server{
 	srv := &Server{
@@ -433,10 +424,7 @@ func TestBuildImageWithCache(t *testing.T) {
 }
 }
 
 
 func TestBuildImageWithoutCache(t *testing.T) {
 func TestBuildImageWithoutCache(t *testing.T) {
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
+	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
 	srv := &Server{
 	srv := &Server{
@@ -464,10 +452,7 @@ func TestBuildImageWithoutCache(t *testing.T) {
 }
 }
 
 
 func TestForbiddenContextPath(t *testing.T) {
 func TestForbiddenContextPath(t *testing.T) {
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
+	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
 	srv := &Server{
 	srv := &Server{
@@ -513,10 +498,7 @@ func TestForbiddenContextPath(t *testing.T) {
 }
 }
 
 
 func TestBuildADDFileNotFound(t *testing.T) {
 func TestBuildADDFileNotFound(t *testing.T) {
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
+	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
 	srv := &Server{
 	srv := &Server{

+ 292 - 128
commands.go

@@ -23,6 +23,7 @@ import (
 	"os/signal"
 	"os/signal"
 	"path/filepath"
 	"path/filepath"
 	"reflect"
 	"reflect"
+	"regexp"
 	"runtime"
 	"runtime"
 	"sort"
 	"sort"
 	"strconv"
 	"strconv"
@@ -41,9 +42,13 @@ var (
 	ErrConnectionRefused = errors.New("Can't connect to docker daemon. Is 'docker -d' running on this host?")
 	ErrConnectionRefused = errors.New("Can't connect to docker daemon. Is 'docker -d' running on this host?")
 )
 )
 
 
-func (cli *DockerCli) getMethod(name string) (reflect.Method, bool) {
+func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
 	methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
 	methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
-	return reflect.TypeOf(cli).MethodByName(methodName)
+	method := reflect.ValueOf(cli).MethodByName(methodName)
+	if !method.IsValid() {
+		return nil, false
+	}
+	return method.Interface().(func(...string) error), true
 }
 }
 
 
 func ParseCommands(proto, addr string, args ...string) error {
 func ParseCommands(proto, addr string, args ...string) error {
@@ -55,14 +60,7 @@ func ParseCommands(proto, addr string, args ...string) error {
 			fmt.Println("Error: Command not found:", args[0])
 			fmt.Println("Error: Command not found:", args[0])
 			return cli.CmdHelp(args[1:]...)
 			return cli.CmdHelp(args[1:]...)
 		}
 		}
-		ret := method.Func.CallSlice([]reflect.Value{
-			reflect.ValueOf(cli),
-			reflect.ValueOf(args[1:]),
-		})[0].Interface()
-		if ret == nil {
-			return nil
-		}
-		return ret.(error)
+		return method(args[1:]...)
 	}
 	}
 	return cli.CmdHelp(args...)
 	return cli.CmdHelp(args...)
 }
 }
@@ -73,10 +71,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 		if !exists {
 		if !exists {
 			fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0])
 			fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0])
 		} else {
 		} else {
-			method.Func.CallSlice([]reflect.Value{
-				reflect.ValueOf(cli),
-				reflect.ValueOf([]string{"--help"}),
-			})[0].Interface()
+			method("--help")
 			return nil
 			return nil
 		}
 		}
 	}
 	}
@@ -99,7 +94,6 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 		{"login", "Register or Login to the docker registry server"},
 		{"login", "Register or Login to the docker registry server"},
 		{"logs", "Fetch the logs of a container"},
 		{"logs", "Fetch the logs of a container"},
 		{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
 		{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
-		{"top", "Lookup the running processes of a container"},
 		{"ps", "List containers"},
 		{"ps", "List containers"},
 		{"pull", "Pull an image or a repository from the docker registry server"},
 		{"pull", "Pull an image or a repository from the docker registry server"},
 		{"push", "Push an image or a repository to the docker registry server"},
 		{"push", "Push an image or a repository to the docker registry server"},
@@ -111,6 +105,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 		{"start", "Start a stopped container"},
 		{"start", "Start a stopped container"},
 		{"stop", "Stop a running container"},
 		{"stop", "Stop a running container"},
 		{"tag", "Tag an image into a repository"},
 		{"tag", "Tag an image into a repository"},
+		{"top", "Lookup the running processes of a container"},
 		{"version", "Show the docker version information"},
 		{"version", "Show the docker version information"},
 		{"wait", "Block until a container stops, then print its exit code"},
 		{"wait", "Block until a container stops, then print its exit code"},
 	} {
 	} {
@@ -303,7 +298,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	}
 	}
 
 
 	cli.LoadConfigFile()
 	cli.LoadConfigFile()
-	authconfig, ok := cli.configFile.Configs[auth.IndexServerAddress()]
+	authconfig, ok := cli.configFile.Configs[serverAddress]
 	if !ok {
 	if !ok {
 		authconfig = auth.AuthConfig{}
 		authconfig = auth.AuthConfig{}
 	}
 	}
@@ -418,7 +413,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
 	var out APIVersion
 	var out APIVersion
 	err = json.Unmarshal(body, &out)
 	err = json.Unmarshal(body, &out)
 	if err != nil {
 	if err != nil {
-		utils.Debugf("Error unmarshal: body: %s, err: %s\n", body, err)
+		utils.Errorf("Error unmarshal: body: %s, err: %s\n", body, err)
 		return err
 		return err
 	}
 	}
 	if out.Version != "" {
 	if out.Version != "" {
@@ -496,7 +491,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdStop(args ...string) error {
 func (cli *DockerCli) CmdStop(args ...string) error {
-	cmd := Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container")
+	cmd := Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)")
 	nSeconds := cmd.Int("t", 10, "Number of seconds to wait for the container to stop before killing it.")
 	nSeconds := cmd.Int("t", 10, "Number of seconds to wait for the container to stop before killing it.")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -545,8 +540,23 @@ func (cli *DockerCli) CmdRestart(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
+func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
+	sigc := make(chan os.Signal, 1)
+	utils.CatchAll(sigc)
+	go func() {
+		for s := range sigc {
+			if _, _, err := cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil); err != nil {
+				utils.Debugf("Error sending signal: %s", err)
+			}
+		}
+	}()
+	return sigc
+}
+
 func (cli *DockerCli) CmdStart(args ...string) error {
 func (cli *DockerCli) CmdStart(args ...string) error {
 	cmd := Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
 	cmd := Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
+	attach := cmd.Bool("a", false, "Attach container's stdout/stderr and forward all signals to the process")
+	openStdin := cmd.Bool("i", false, "Attach container's stdin")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
 	}
 	}
@@ -555,17 +565,75 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
+	var cErr chan error
+	if *attach || *openStdin {
+		if cmd.NArg() > 1 {
+			return fmt.Errorf("Impossible to start and attach multiple containers at once.")
+		}
+
+		body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
+		if err != nil {
+			return err
+		}
+
+		container := &Container{}
+		err = json.Unmarshal(body, container)
+		if err != nil {
+			return err
+		}
+
+		if !container.Config.Tty {
+			sigc := cli.forwardAllSignals(cmd.Arg(0))
+			defer utils.StopCatch(sigc)
+		}
+
+		if container.Config.Tty && cli.isTerminal {
+			if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
+				return err
+			}
+		}
+
+		var in io.ReadCloser
+
+		v := url.Values{}
+		v.Set("stream", "1")
+		if *openStdin && container.Config.OpenStdin {
+			v.Set("stdin", "1")
+			in = cli.in
+		}
+		v.Set("stdout", "1")
+		v.Set("stderr", "1")
+
+		cErr = utils.Go(func() error {
+			return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil)
+		})
+	}
+
 	var encounteredError error
 	var encounteredError error
-	for _, name := range args {
+	for _, name := range cmd.Args() {
 		_, _, err := cli.call("POST", "/containers/"+name+"/start", nil)
 		_, _, err := cli.call("POST", "/containers/"+name+"/start", nil)
 		if err != nil {
 		if err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to start one or more containers")
+			if !*attach || !*openStdin {
+				fmt.Fprintf(cli.err, "%s\n", err)
+				encounteredError = fmt.Errorf("Error: failed to start one or more containers")
+			}
 		} else {
 		} else {
-			fmt.Fprintf(cli.out, "%s\n", name)
+			if !*attach || !*openStdin {
+				fmt.Fprintf(cli.out, "%s\n", name)
+			}
+		}
+	}
+	if encounteredError != nil {
+		if *openStdin || *attach {
+			cli.in.Close()
+			<-cErr
 		}
 		}
+		return encounteredError
 	}
 	}
-	return encounteredError
+	if *openStdin || *attach {
+		return <-cErr
+	}
+	return nil
 }
 }
 
 
 func (cli *DockerCli) CmdInspect(args ...string) error {
 func (cli *DockerCli) CmdInspect(args ...string) error {
@@ -577,30 +645,39 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 		cmd.Usage()
 		cmd.Usage()
 		return nil
 		return nil
 	}
 	}
-	fmt.Fprintf(cli.out, "[")
-	for i, name := range args {
-		if i > 0 {
-			fmt.Fprintf(cli.out, ",")
-		}
+
+	indented := new(bytes.Buffer)
+	status := 0
+
+	for _, name := range args {
 		obj, _, err := cli.call("GET", "/containers/"+name+"/json", nil)
 		obj, _, err := cli.call("GET", "/containers/"+name+"/json", nil)
 		if err != nil {
 		if err != nil {
 			obj, _, err = cli.call("GET", "/images/"+name+"/json", nil)
 			obj, _, err = cli.call("GET", "/images/"+name+"/json", nil)
 			if err != nil {
 			if err != nil {
 				fmt.Fprintf(cli.err, "No such image or container: %s\n", name)
 				fmt.Fprintf(cli.err, "No such image or container: %s\n", name)
+				status = 1
 				continue
 				continue
 			}
 			}
 		}
 		}
 
 
-		indented := new(bytes.Buffer)
 		if err = json.Indent(indented, obj, "", "    "); err != nil {
 		if err = json.Indent(indented, obj, "", "    "); err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
+			status = 1
 			continue
 			continue
 		}
 		}
-		if _, err := io.Copy(cli.out, indented); err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-		}
+		indented.WriteString(",")
+	}
+	// Remove trailling ','
+	indented.Truncate(indented.Len() - 1)
+
+	fmt.Fprintf(cli.out, "[")
+	if _, err := io.Copy(cli.out, indented); err != nil {
+		return err
 	}
 	}
 	fmt.Fprintf(cli.out, "]")
 	fmt.Fprintf(cli.out, "]")
+	if status != 0 {
+		return &utils.StatusError{Status: status}
+	}
 	return nil
 	return nil
 }
 }
 
 
@@ -647,11 +724,11 @@ func (cli *DockerCli) CmdPort(args ...string) error {
 	}
 	}
 
 
 	port := cmd.Arg(1)
 	port := cmd.Arg(1)
-	proto := "Tcp"
+	proto := "tcp"
 	parts := strings.SplitN(port, "/", 2)
 	parts := strings.SplitN(port, "/", 2)
 	if len(parts) == 2 && len(parts[1]) != 0 {
 	if len(parts) == 2 && len(parts[1]) != 0 {
 		port = parts[0]
 		port = parts[0]
-		proto = strings.ToUpper(parts[1][:1]) + strings.ToLower(parts[1][1:])
+		proto = parts[1]
 	}
 	}
 	body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
 	body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
 	if err != nil {
 	if err != nil {
@@ -663,8 +740,14 @@ func (cli *DockerCli) CmdPort(args ...string) error {
 		return err
 		return err
 	}
 	}
 
 
-	if frontend, exists := out.NetworkSettings.PortMapping[proto][port]; exists {
-		fmt.Fprintf(cli.out, "%s\n", frontend)
+	if frontends, exists := out.NetworkSettings.Ports[Port(port+"/"+proto)]; exists {
+		if frontends == nil {
+			fmt.Fprintf(cli.out, "%s\n", port)
+		} else {
+			for _, frontend := range frontends {
+				fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
+			}
+		}
 	} else {
 	} else {
 		return fmt.Errorf("Error: No private port '%s' allocated on %s", cmd.Arg(1), cmd.Arg(0))
 		return fmt.Errorf("Error: No private port '%s' allocated on %s", cmd.Arg(1), cmd.Arg(0))
 	}
 	}
@@ -705,7 +788,10 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdHistory(args ...string) error {
 func (cli *DockerCli) CmdHistory(args ...string) error {
-	cmd := Subcmd("history", "IMAGE", "Show the history of an image")
+	cmd := Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image")
+	quiet := cmd.Bool("q", false, "only show numeric IDs")
+	noTrunc := cmd.Bool("notrunc", false, "Don't truncate output")
+
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
 	}
 	}
@@ -724,14 +810,35 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
+
 	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
-	fmt.Fprintln(w, "ID\tCREATED\tCREATED BY")
+	if !*quiet {
+		fmt.Fprintln(w, "ID\tCREATED\tCREATED BY\tSIZE")
+	}
 
 
 	for _, out := range outs {
 	for _, out := range outs {
-		if out.Tags != nil {
-			out.ID = out.Tags[0]
+		if !*quiet {
+			if *noTrunc {
+				fmt.Fprintf(w, "%s\t", out.ID)
+			} else {
+				fmt.Fprintf(w, "%s\t", utils.TruncateID(out.ID))
+			}
+
+			fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))))
+
+			if *noTrunc {
+				fmt.Fprintf(w, "%s\t", out.CreatedBy)
+			} else {
+				fmt.Fprintf(w, "%s\t", utils.Trunc(out.CreatedBy, 45))
+			}
+			fmt.Fprintf(w, "%s\n", utils.HumanSize(out.Size))
+		} else {
+			if *noTrunc {
+				fmt.Fprintln(w, out.ID)
+			} else {
+				fmt.Fprintln(w, utils.TruncateID(out.ID))
+			}
 		}
 		}
-		fmt.Fprintf(w, "%s \t%s ago\t%s\n", out.ID, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.CreatedBy)
 	}
 	}
 	w.Flush()
 	w.Flush()
 	return nil
 	return nil
@@ -740,6 +847,8 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
 func (cli *DockerCli) CmdRm(args ...string) error {
 func (cli *DockerCli) CmdRm(args ...string) error {
 	cmd := Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
 	cmd := Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
 	v := cmd.Bool("v", false, "Remove the volumes associated to the container")
 	v := cmd.Bool("v", false, "Remove the volumes associated to the container")
+	link := cmd.Bool("link", false, "Remove the specified link and not the underlying container")
+
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
 	}
 	}
@@ -751,6 +860,9 @@ func (cli *DockerCli) CmdRm(args ...string) error {
 	if *v {
 	if *v {
 		val.Set("v", "1")
 		val.Set("v", "1")
 	}
 	}
+	if *link {
+		val.Set("link", "1")
+	}
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		_, _, err := cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil)
 		_, _, err := cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil)
 		if err != nil {
 		if err != nil {
@@ -764,7 +876,7 @@ func (cli *DockerCli) CmdRm(args ...string) error {
 
 
 // 'docker kill NAME' kills a running container
 // 'docker kill NAME' kills a running container
 func (cli *DockerCli) CmdKill(args ...string) error {
 func (cli *DockerCli) CmdKill(args ...string) error {
-	cmd := Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container")
+	cmd := Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL)")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
 	}
 	}
@@ -985,25 +1097,19 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 				out.Tag = "<none>"
 				out.Tag = "<none>"
 			}
 			}
 
 
+			if !*noTrunc {
+				out.ID = utils.TruncateID(out.ID)
+			}
+
 			if !*quiet {
 			if !*quiet {
-				fmt.Fprintf(w, "%s\t%s\t", out.Repository, out.Tag)
-				if *noTrunc {
-					fmt.Fprintf(w, "%s\t", out.ID)
-				} else {
-					fmt.Fprintf(w, "%s\t", utils.TruncateID(out.ID))
-				}
-				fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))))
+				fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t", out.Repository, out.Tag, out.ID, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))))
 				if out.VirtualSize > 0 {
 				if out.VirtualSize > 0 {
 					fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.Size), utils.HumanSize(out.VirtualSize))
 					fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.Size), utils.HumanSize(out.VirtualSize))
 				} else {
 				} else {
 					fmt.Fprintf(w, "%s\n", utils.HumanSize(out.Size))
 					fmt.Fprintf(w, "%s\n", utils.HumanSize(out.Size))
 				}
 				}
 			} else {
 			} else {
-				if *noTrunc {
-					fmt.Fprintln(w, out.ID)
-				} else {
-					fmt.Fprintln(w, utils.TruncateID(out.ID))
-				}
+				fmt.Fprintln(w, out.ID)
 			}
 			}
 		}
 		}
 
 
@@ -1017,10 +1123,10 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 func displayablePorts(ports []APIPort) string {
 func displayablePorts(ports []APIPort) string {
 	result := []string{}
 	result := []string{}
 	for _, port := range ports {
 	for _, port := range ports {
-		if port.Type == "tcp" {
-			result = append(result, fmt.Sprintf("%d->%d", port.PublicPort, port.PrivatePort))
+		if port.IP == "" {
+			result = append(result, fmt.Sprintf("%d/%s", port.PublicPort, port.Type))
 		} else {
 		} else {
-			result = append(result, fmt.Sprintf("%d->%d/%s", port.PublicPort, port.PrivatePort, port.Type))
+			result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
 		}
 		}
 	}
 	}
 	sort.Strings(result)
 	sort.Strings(result)
@@ -1073,7 +1179,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 	}
 	}
 	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 	if !*quiet {
 	if !*quiet {
-		fmt.Fprint(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS")
+		fmt.Fprint(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
 		if *size {
 		if *size {
 			fmt.Fprintln(w, "\tSIZE")
 			fmt.Fprintln(w, "\tSIZE")
 		} else {
 		} else {
@@ -1082,12 +1188,20 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 	}
 	}
 
 
 	for _, out := range outs {
 	for _, out := range outs {
+		if !*noTrunc {
+			out.ID = utils.TruncateID(out.ID)
+		}
+
+		// Remove the leading / from the names
+		for i := 0; i < len(out.Names); i++ {
+			out.Names[i] = out.Names[i][1:]
+		}
+
 		if !*quiet {
 		if !*quiet {
-			if *noTrunc {
-				fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t", out.ID, out.Image, out.Command, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Status, displayablePorts(out.Ports))
-			} else {
-				fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t", utils.TruncateID(out.ID), out.Image, utils.Trunc(out.Command, 20), utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Status, displayablePorts(out.Ports))
+			if !*noTrunc {
+				out.Command = utils.Trunc(out.Command, 20)
 			}
 			}
+			fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", out.ID, out.Image, out.Command, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Status, displayablePorts(out.Ports), strings.Join(out.Names, ","))
 			if *size {
 			if *size {
 				if out.SizeRootFs > 0 {
 				if out.SizeRootFs > 0 {
 					fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.SizeRw), utils.HumanSize(out.SizeRootFs))
 					fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.SizeRw), utils.HumanSize(out.SizeRootFs))
@@ -1098,11 +1212,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 				fmt.Fprint(w, "\n")
 				fmt.Fprint(w, "\n")
 			}
 			}
 		} else {
 		} else {
-			if *noTrunc {
-				fmt.Fprintln(w, out.ID)
-			} else {
-				fmt.Fprintln(w, utils.TruncateID(out.ID))
-			}
+			fmt.Fprintln(w, out.ID)
 		}
 		}
 	}
 	}
 
 
@@ -1229,15 +1339,18 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 		cmd.Usage()
 		cmd.Usage()
 		return nil
 		return nil
 	}
 	}
+	name := cmd.Arg(0)
 
 
-	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?logs=1&stdout=1&stderr=1", false, nil, cli.out, cli.err); err != nil {
+	if err := cli.hijack("POST", "/containers/"+name+"/attach?logs=1&stdout=1&stderr=1", false, nil, cli.out, cli.err, nil); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 func (cli *DockerCli) CmdAttach(args ...string) error {
 func (cli *DockerCli) CmdAttach(args ...string) error {
-	cmd := Subcmd("attach", "CONTAINER", "Attach to a running container")
+	cmd := Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
+	noStdin := cmd.Bool("nostdin", false, "Do not attach stdin")
+	proxy := cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
 	}
 	}
@@ -1245,8 +1358,8 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 		cmd.Usage()
 		cmd.Usage()
 		return nil
 		return nil
 	}
 	}
-
-	body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
+	name := cmd.Arg(0)
+	body, _, err := cli.call("GET", "/containers/"+name+"/json", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -1261,19 +1374,29 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 		return fmt.Errorf("Impossible to attach to a stopped container, start it first")
 		return fmt.Errorf("Impossible to attach to a stopped container, start it first")
 	}
 	}
 
 
-	if container.Config.Tty {
+	if container.Config.Tty && cli.isTerminal {
 		if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
 		if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
-			utils.Debugf("Error monitoring tty size: %s", err)
+			utils.Debugf("Error monitoring TTY size: %s", err)
 		}
 		}
 	}
 	}
 
 
+	var in io.ReadCloser
+
 	v := url.Values{}
 	v := url.Values{}
 	v.Set("stream", "1")
 	v.Set("stream", "1")
-	v.Set("stdin", "1")
+	if !*noStdin && container.Config.OpenStdin {
+		v.Set("stdin", "1")
+		in = cli.in
+	}
 	v.Set("stdout", "1")
 	v.Set("stdout", "1")
 	v.Set("stderr", "1")
 	v.Set("stderr", "1")
 
 
-	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, cli.in, cli.out, cli.err); err != nil {
+	if *proxy && !container.Config.Tty {
+		sigc := cli.forwardAllSignals(cmd.Arg(0))
+		defer utils.StopCatch(sigc)
+	}
+
+	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
@@ -1326,18 +1449,6 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
 // Ports type - Used to parse multiple -p flags
 // Ports type - Used to parse multiple -p flags
 type ports []int
 type ports []int
 
 
-// ListOpts type
-type ListOpts []string
-
-func (opts *ListOpts) String() string {
-	return fmt.Sprint(*opts)
-}
-
-func (opts *ListOpts) Set(value string) error {
-	*opts = append(*opts, value)
-	return nil
-}
-
 // AttachOpts stores arguments to 'docker run -a', eg. which streams to attach to
 // AttachOpts stores arguments to 'docker run -a', eg. which streams to attach to
 type AttachOpts map[string]bool
 type AttachOpts map[string]bool
 
 
@@ -1436,6 +1547,13 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	flRm := cmd.Lookup("rm")
 	flRm := cmd.Lookup("rm")
 	autoRemove, _ := strconv.ParseBool(flRm.Value.String())
 	autoRemove, _ := strconv.ParseBool(flRm.Value.String())
 
 
+	flSigProxy := cmd.Lookup("sig-proxy")
+	sigProxy, _ := strconv.ParseBool(flSigProxy.Value.String())
+	flName := cmd.Lookup("name")
+	if config.Tty {
+		sigProxy = false
+	}
+
 	var containerIDFile *os.File
 	var containerIDFile *os.File
 	if len(hostConfig.ContainerIDFile) > 0 {
 	if len(hostConfig.ContainerIDFile) > 0 {
 		if _, err := ioutil.ReadFile(hostConfig.ContainerIDFile); err == nil {
 		if _, err := ioutil.ReadFile(hostConfig.ContainerIDFile); err == nil {
@@ -1447,9 +1565,14 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		}
 		}
 		defer containerIDFile.Close()
 		defer containerIDFile.Close()
 	}
 	}
+	containerValues := url.Values{}
+	name := flName.Value.String()
+	if name != "" {
+		containerValues.Set("name", name)
+	}
 
 
 	//create the container
 	//create the container
-	body, statusCode, err := cli.call("POST", "/containers/create", config)
+	body, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config)
 	//if image not found try to pull it
 	//if image not found try to pull it
 	if statusCode == 404 {
 	if statusCode == 404 {
 		_, tag := utils.ParseRepositoryTag(config.Image)
 		_, tag := utils.ParseRepositoryTag(config.Image)
@@ -1490,7 +1613,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
-		body, _, err = cli.call("POST", "/containers/create", config)
+		body, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -1514,12 +1637,15 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		}
 		}
 	}
 	}
 
 
-	//start the container
-	if _, _, err = cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig); err != nil {
-		return err
+	if sigProxy {
+		sigc := cli.forwardAllSignals(runResult.ID)
+		defer utils.StopCatch(sigc)
 	}
 	}
 
 
-	var wait chan struct{}
+	var (
+		wait  chan struct{}
+		errCh chan error
+	)
 
 
 	if !config.AttachStdout && !config.AttachStderr {
 	if !config.AttachStdout && !config.AttachStderr {
 		// Make this asynchrone in order to let the client write to stdin before having to read the ID
 		// Make this asynchrone in order to let the client write to stdin before having to read the ID
@@ -1530,20 +1656,18 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		}()
 		}()
 	}
 	}
 
 
+	hijacked := make(chan bool)
+
 	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
 	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
-		if config.Tty {
-			if err := cli.monitorTtySize(runResult.ID); err != nil {
-				utils.Debugf("Error monitoring TTY size: %s\n", err)
-			}
-		}
 
 
 		v := url.Values{}
 		v := url.Values{}
-		v.Set("logs", "1")
 		v.Set("stream", "1")
 		v.Set("stream", "1")
 		var out, stderr io.Writer
 		var out, stderr io.Writer
+		var in io.ReadCloser
 
 
 		if config.AttachStdin {
 		if config.AttachStdin {
 			v.Set("stdin", "1")
 			v.Set("stdin", "1")
+			in = cli.in
 		}
 		}
 		if config.AttachStdout {
 		if config.AttachStdout {
 			v.Set("stdout", "1")
 			v.Set("stdout", "1")
@@ -1558,18 +1682,36 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 			}
 			}
 		}
 		}
 
 
-		signals := make(chan os.Signal, 1)
-		signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
-		go func() {
-			for sig := range signals {
-				fmt.Printf("\nReceived signal: %s; cleaning up\n", sig)
-				if err := cli.CmdStop("-t", "4", runResult.ID); err != nil {
-					fmt.Printf("failed to stop container: %v", err)
-				}
-			}
-		}()
+		errCh = utils.Go(func() error {
+			return cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked)
+		})
+	} else {
+		close(hijacked)
+	}
 
 
-		if err := cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, cli.in, out, stderr); err != nil {
+	// Acknowledge the hijack before starting
+	select {
+	case <-hijacked:
+	case err := <-errCh:
+		if err != nil {
+			utils.Debugf("Error hijack: %s", err)
+			return err
+		}
+	}
+
+	//start the container
+	if _, _, err = cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig); err != nil {
+		return err
+	}
+
+	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
+		if err := cli.monitorTtySize(runResult.ID); err != nil {
+			utils.Errorf("Error monitoring TTY size: %s\n", err)
+		}
+	}
+
+	if errCh != nil {
+		if err := <-errCh; err != nil {
 			utils.Debugf("Error hijack: %s", err)
 			utils.Debugf("Error hijack: %s", err)
 			return err
 			return err
 		}
 		}
@@ -1579,13 +1721,19 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		// Detached mode
 		// Detached mode
 		<-wait
 		<-wait
 	} else {
 	} else {
-		status, err := getExitCode(cli, runResult.ID)
+		running, status, err := getExitCode(cli, runResult.ID)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
 		if autoRemove {
 		if autoRemove {
-			_, _, err = cli.call("DELETE", "/containers/"+runResult.ID, nil)
-			if err != nil {
+			if running {
+				return fmt.Errorf("Impossible to auto-remove a detached container")
+			}
+			// Wait for the process to
+			if _, _, err := cli.call("POST", "/containers/"+runResult.ID+"/wait", nil); err != nil {
+				return err
+			}
+			if _, _, err := cli.call("DELETE", "/containers/"+runResult.ID, nil); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}
@@ -1642,6 +1790,10 @@ func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int,
 		params = bytes.NewBuffer(buf)
 		params = bytes.NewBuffer(buf)
 	}
 	}
 
 
+	// fixme: refactor client to support redirect
+	re := regexp.MustCompile("/+")
+	path = re.ReplaceAllString(path, "/")
+
 	req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), params)
 	req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), params)
 	if err != nil {
 	if err != nil {
 		return nil, -1, err
 		return nil, -1, err
@@ -1670,6 +1822,7 @@ func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int,
 		return nil, -1, err
 		return nil, -1, err
 	}
 	}
 	defer resp.Body.Close()
 	defer resp.Body.Close()
+
 	body, err := ioutil.ReadAll(resp.Body)
 	body, err := ioutil.ReadAll(resp.Body)
 	if err != nil {
 	if err != nil {
 		return nil, -1, err
 		return nil, -1, err
@@ -1687,6 +1840,11 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h
 	if (method == "POST" || method == "PUT") && in == nil {
 	if (method == "POST" || method == "PUT") && in == nil {
 		in = bytes.NewReader([]byte{})
 		in = bytes.NewReader([]byte{})
 	}
 	}
+
+	// fixme: refactor client to support redirect
+	re := regexp.MustCompile("/+")
+	path = re.ReplaceAllString(path, "/")
+
 	req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), in)
 	req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), in)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -1742,7 +1900,10 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h
 	return nil
 	return nil
 }
 }
 
 
-func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer) error {
+func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan bool) error {
+	// fixme: refactor client to support redirect
+	re := regexp.MustCompile("/+")
+	path = re.ReplaceAllString(path, "/")
 
 
 	req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), nil)
 	req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), nil)
 	if err != nil {
 	if err != nil {
@@ -1768,6 +1929,10 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 	rwc, br := clientconn.Hijack()
 	rwc, br := clientconn.Hijack()
 	defer rwc.Close()
 	defer rwc.Close()
 
 
+	if started != nil {
+		started <- true
+	}
+
 	var receiveStdout chan error
 	var receiveStdout chan error
 
 
 	if stdout != nil {
 	if stdout != nil {
@@ -1798,11 +1963,11 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 		}
 		}
 		if tcpc, ok := rwc.(*net.TCPConn); ok {
 		if tcpc, ok := rwc.(*net.TCPConn); ok {
 			if err := tcpc.CloseWrite(); err != nil {
 			if err := tcpc.CloseWrite(); err != nil {
-				utils.Debugf("Couldn't send EOF: %s\n", err)
+				utils.Errorf("Couldn't send EOF: %s\n", err)
 			}
 			}
 		} else if unixc, ok := rwc.(*net.UnixConn); ok {
 		} else if unixc, ok := rwc.(*net.UnixConn); ok {
 			if err := unixc.CloseWrite(); err != nil {
 			if err := unixc.CloseWrite(); err != nil {
-				utils.Debugf("Couldn't send EOF: %s\n", err)
+				utils.Errorf("Couldn't send EOF: %s\n", err)
 			}
 			}
 		}
 		}
 		// Discard errors due to pipe interruption
 		// Discard errors due to pipe interruption
@@ -1811,14 +1976,14 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 
 
 	if stdout != nil {
 	if stdout != nil {
 		if err := <-receiveStdout; err != nil {
 		if err := <-receiveStdout; err != nil {
-			utils.Debugf("Error receiveStdout: %s", err)
+			utils.Errorf("Error receiveStdout: %s", err)
 			return err
 			return err
 		}
 		}
 	}
 	}
 
 
 	if !cli.isTerminal {
 	if !cli.isTerminal {
 		if err := <-sendStdin; err != nil {
 		if err := <-sendStdin; err != nil {
-			utils.Debugf("Error sendStdin: %s", err)
+			utils.Errorf("Error sendStdin: %s", err)
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -1832,7 +1997,7 @@ func (cli *DockerCli) getTtySize() (int, int) {
 	}
 	}
 	ws, err := term.GetWinsize(cli.terminalFd)
 	ws, err := term.GetWinsize(cli.terminalFd)
 	if err != nil {
 	if err != nil {
-		utils.Debugf("Error getting size: %s", err)
+		utils.Errorf("Error getting size: %s", err)
 		if ws == nil {
 		if ws == nil {
 			return 0, 0
 			return 0, 0
 		}
 		}
@@ -1849,14 +2014,11 @@ func (cli *DockerCli) resizeTty(id string) {
 	v.Set("h", strconv.Itoa(height))
 	v.Set("h", strconv.Itoa(height))
 	v.Set("w", strconv.Itoa(width))
 	v.Set("w", strconv.Itoa(width))
 	if _, _, err := cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil); err != nil {
 	if _, _, err := cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil); err != nil {
-		utils.Debugf("Error resize: %s", err)
+		utils.Errorf("Error resize: %s", err)
 	}
 	}
 }
 }
 
 
 func (cli *DockerCli) monitorTtySize(id string) error {
 func (cli *DockerCli) monitorTtySize(id string) error {
-	if !cli.isTerminal {
-		return fmt.Errorf("Impossible to monitor size on non-tty")
-	}
 	cli.resizeTty(id)
 	cli.resizeTty(id)
 
 
 	sigchan := make(chan os.Signal, 1)
 	sigchan := make(chan os.Signal, 1)
@@ -1904,20 +2066,22 @@ func waitForExit(cli *DockerCli, containerId string) (int, error) {
 	return out.StatusCode, nil
 	return out.StatusCode, nil
 }
 }
 
 
-func getExitCode(cli *DockerCli, containerId string) (int, error) {
+// getExitCode perform an inspect on the container. It returns
+// the running state and the exit code.
+func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
 	body, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil)
 	body, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil)
 	if err != nil {
 	if err != nil {
 		// If we can't connect, then the daemon probably died.
 		// If we can't connect, then the daemon probably died.
 		if err != ErrConnectionRefused {
 		if err != ErrConnectionRefused {
-			return -1, err
+			return false, -1, err
 		}
 		}
-		return -1, nil
+		return false, -1, nil
 	}
 	}
 	c := &Container{}
 	c := &Container{}
 	if err := json.Unmarshal(body, c); err != nil {
 	if err := json.Unmarshal(body, c); err != nil {
-		return -1, err
+		return false, -1, err
 	}
 	}
-	return c.State.ExitCode, nil
+	return c.State.Running, c.State.ExitCode, nil
 }
 }
 
 
 func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
 func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {

+ 82 - 16
commands_test.go

@@ -84,10 +84,24 @@ func TestRunHostname(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
+	container := globalRuntime.List()[0]
+
+	setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
 		<-c
 		<-c
+
+		go func() {
+			cli.CmdWait(container.ID)
+		}()
+
+		if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
+			t.Fatal(err)
+		}
 	})
 	})
 
 
+	// Cleanup pipes
+	if err := closeWrap(stdout, stdoutPipe); err != nil {
+		t.Fatal(err)
+	}
 }
 }
 
 
 // TestRunWorkdir checks that 'docker run -w' correctly sets a custom working directory
 // TestRunWorkdir checks that 'docker run -w' correctly sets a custom working directory
@@ -115,10 +129,24 @@ func TestRunWorkdir(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
+	container := globalRuntime.List()[0]
+
+	setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
 		<-c
 		<-c
+
+		go func() {
+			cli.CmdWait(container.ID)
+		}()
+
+		if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
+			t.Fatal(err)
+		}
 	})
 	})
 
 
+	// Cleanup pipes
+	if err := closeWrap(stdout, stdoutPipe); err != nil {
+		t.Fatal(err)
+	}
 }
 }
 
 
 // TestRunWorkdirExists checks that 'docker run -w' correctly sets a custom working directory, even if it exists
 // TestRunWorkdirExists checks that 'docker run -w' correctly sets a custom working directory, even if it exists
@@ -146,10 +174,24 @@ func TestRunWorkdirExists(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
+	container := globalRuntime.List()[0]
+
 	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
 	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
 		<-c
 		<-c
+
+		go func() {
+			cli.CmdWait(container.ID)
+		}()
+
+		if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
+			t.Fatal(err)
+		}
 	})
 	})
 
 
+	// Cleanup pipes
+	if err := closeWrap(stdout, stdoutPipe); err != nil {
+		t.Fatal(err)
+	}
 }
 }
 
 
 func TestRunExit(t *testing.T) {
 func TestRunExit(t *testing.T) {
@@ -262,7 +304,7 @@ func TestRunDisconnectTty(t *testing.T) {
 		// We're simulating a disconnect so the return value doesn't matter. What matters is the
 		// We're simulating a disconnect so the return value doesn't matter. What matters is the
 		// fact that CmdRun returns.
 		// fact that CmdRun returns.
 		if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil {
 		if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil {
-			utils.Debugf("Error CmdRun: %s\n", err)
+			utils.Debugf("Error CmdRun: %s", err)
 		}
 		}
 
 
 		close(c1)
 		close(c1)
@@ -393,12 +435,14 @@ func TestRunDetach(t *testing.T) {
 	container := globalRuntime.List()[0]
 	container := globalRuntime.List()[0]
 
 
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
-		stdinPipe.Write([]byte{'', ''})
+		stdinPipe.Write([]byte{16, 17})
 		if err := stdinPipe.Close(); err != nil {
 		if err := stdinPipe.Close(); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
 
 
+	closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
+
 	// wait for CmdRun to return
 	// wait for CmdRun to return
 	setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() {
 	setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() {
 		<-ch
 		<-ch
@@ -411,7 +455,6 @@ func TestRunDetach(t *testing.T) {
 
 
 	setTimeout(t, "Waiting for container to die timed out", 20*time.Second, func() {
 	setTimeout(t, "Waiting for container to die timed out", 20*time.Second, func() {
 		container.Kill()
 		container.Kill()
-		container.Wait()
 	})
 	})
 }
 }
 
 
@@ -423,39 +466,62 @@ func TestAttachDetach(t *testing.T) {
 	cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	defer cleanup(globalRuntime)
 	defer cleanup(globalRuntime)
 
 
-	go stdout.Read(make([]byte, 1024))
-	setTimeout(t, "Starting container timed out", 2*time.Second, func() {
+	ch := make(chan struct{})
+	go func() {
+		defer close(ch)
 		if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil {
 		if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
-	})
+	}()
 
 
-	container := globalRuntime.List()[0]
+	var container *Container
+
+	setTimeout(t, "Reading container's id timed out", 10*time.Second, func() {
+		buf := make([]byte, 1024)
+		n, err := stdout.Read(buf)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		container = globalRuntime.List()[0]
+
+		if strings.Trim(string(buf[:n]), " \r\n") != container.ShortID() {
+			t.Fatalf("Wrong ID received. Expect %s, received %s", container.ShortID(), buf[:n])
+		}
+	})
+	setTimeout(t, "Starting container timed out", 10*time.Second, func() {
+		<-ch
+	})
 
 
 	stdin, stdinPipe = io.Pipe()
 	stdin, stdinPipe = io.Pipe()
 	stdout, stdoutPipe = io.Pipe()
 	stdout, stdoutPipe = io.Pipe()
 	cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 
 
-	ch := make(chan struct{})
+	ch = make(chan struct{})
 	go func() {
 	go func() {
 		defer close(ch)
 		defer close(ch)
 		if err := cli.CmdAttach(container.ShortID()); err != nil {
 		if err := cli.CmdAttach(container.ShortID()); err != nil {
-			t.Fatal(err)
+			if err != io.ErrClosedPipe {
+				t.Fatal(err)
+			}
 		}
 		}
 	}()
 	}()
 
 
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
 		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
 		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
-			t.Fatal(err)
+			if err != io.ErrClosedPipe {
+				t.Fatal(err)
+			}
 		}
 		}
 	})
 	})
 
 
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
-		stdinPipe.Write([]byte{'', ''})
+		stdinPipe.Write([]byte{16, 17})
 		if err := stdinPipe.Close(); err != nil {
 		if err := stdinPipe.Close(); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
+	closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
 
 
 	// wait for CmdRun to return
 	// wait for CmdRun to return
 	setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
 	setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
@@ -469,7 +535,6 @@ func TestAttachDetach(t *testing.T) {
 
 
 	setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() {
 	setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() {
 		container.Kill()
 		container.Kill()
-		container.Wait()
 	})
 	})
 }
 }
 
 
@@ -484,7 +549,7 @@ func TestAttachDisconnect(t *testing.T) {
 	go func() {
 	go func() {
 		// Start a process in daemon mode
 		// Start a process in daemon mode
 		if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil {
 		if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil {
-			utils.Debugf("Error CmdRun: %s\n", err)
+			utils.Debugf("Error CmdRun: %s", err)
 		}
 		}
 	}()
 	}()
 
 
@@ -545,6 +610,7 @@ func TestAttachDisconnect(t *testing.T) {
 
 
 // Expected behaviour: container gets deleted automatically after exit
 // Expected behaviour: container gets deleted automatically after exit
 func TestRunAutoRemove(t *testing.T) {
 func TestRunAutoRemove(t *testing.T) {
+	t.Skip("Fixme. Skipping test for now, race condition")
 	stdout, stdoutPipe := io.Pipe()
 	stdout, stdoutPipe := io.Pipe()
 	cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	defer cleanup(globalRuntime)
 	defer cleanup(globalRuntime)
@@ -569,7 +635,7 @@ func TestRunAutoRemove(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
+	setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
 		<-c
 		<-c
 	})
 	})
 
 

+ 18 - 0
config.go

@@ -0,0 +1,18 @@
+package docker
+
+import (
+	"net"
+)
+
+type DaemonConfig struct {
+	Pidfile                     string
+	GraphPath                   string
+	ProtoAddresses              []string
+	AutoRestart                 bool
+	EnableCors                  bool
+	Dns                         []string
+	EnableIptables              bool
+	BridgeIface                 string
+	DefaultIp                   net.IP
+	InterContainerCommunication bool
+}

+ 392 - 149
container.go

@@ -1,6 +1,7 @@
 package docker
 package docker
 
 
 import (
 import (
+	"bytes"
 	"encoding/json"
 	"encoding/json"
 	"errors"
 	"errors"
 	"flag"
 	"flag"
@@ -43,6 +44,7 @@ type Container struct {
 	ResolvConfPath string
 	ResolvConfPath string
 	HostnamePath   string
 	HostnamePath   string
 	HostsPath      string
 	HostsPath      string
+	Name           string
 
 
 	cmd       *exec.Cmd
 	cmd       *exec.Cmd
 	stdout    *utils.WriteBroadcaster
 	stdout    *utils.WriteBroadcaster
@@ -58,6 +60,8 @@ type Container struct {
 	// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
 	// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
 	// Easier than migrating older container configs :)
 	// Easier than migrating older container configs :)
 	VolumesRW map[string]bool
 	VolumesRW map[string]bool
+
+	activeLinks map[string]*Link
 }
 }
 
 
 type Config struct {
 type Config struct {
@@ -70,7 +74,8 @@ type Config struct {
 	AttachStdin     bool
 	AttachStdin     bool
 	AttachStdout    bool
 	AttachStdout    bool
 	AttachStderr    bool
 	AttachStderr    bool
-	PortSpecs       []string
+	PortSpecs       []string // Deprecated - Can be in the format of 8080/tcp
+	ExposedPorts    map[Port]struct{}
 	Tty             bool // Attach standard streams to a tty, including stdin if it is not closed.
 	Tty             bool // Attach standard streams to a tty, including stdin if it is not closed.
 	OpenStdin       bool // Open stdin
 	OpenStdin       bool // Open stdin
 	StdinOnce       bool // If true, close stdin after the 1 attached client disconnects.
 	StdinOnce       bool // If true, close stdin after the 1 attached client disconnects.
@@ -90,6 +95,8 @@ type HostConfig struct {
 	Binds           []string
 	Binds           []string
 	ContainerIDFile string
 	ContainerIDFile string
 	LxcConf         []KeyValuePair
 	LxcConf         []KeyValuePair
+	PortBindings    map[Port][]PortBinding
+	Links           []string
 }
 }
 
 
 type BindMap struct {
 type BindMap struct {
@@ -99,7 +106,11 @@ type BindMap struct {
 }
 }
 
 
 var (
 var (
-	ErrInvaidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.")
+	ErrContainerStart           = errors.New("The container failed to start. Unkown error")
+	ErrContainerStartTimeout    = errors.New("The container failed to start due to timed out.")
+	ErrInvalidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.")
+	ErrConflictAttachDetach     = errors.New("Conflicting options: -a and -d")
+	ErrConflictDetachAutoRemove = errors.New("Conflicting options: -rm and -d")
 )
 )
 
 
 type KeyValuePair struct {
 type KeyValuePair struct {
@@ -107,6 +118,34 @@ type KeyValuePair struct {
 	Value string
 	Value string
 }
 }
 
 
+type PortBinding struct {
+	HostIp   string
+	HostPort string
+}
+
+// 80/tcp
+type Port string
+
+func (p Port) Proto() string {
+	return strings.Split(string(p), "/")[1]
+}
+
+func (p Port) Port() string {
+	return strings.Split(string(p), "/")[0]
+}
+
+func (p Port) Int() int {
+	i, err := parsePort(p.Port())
+	if err != nil {
+		panic(err)
+	}
+	return i
+}
+
+func NewPort(proto, port string) Port {
+	return Port(fmt.Sprintf("%s/%s", port, proto))
+}
+
 func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
 func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
 	cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
 	cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
 	if os.Getenv("TEST") != "" {
 	if os.Getenv("TEST") != "" {
@@ -127,6 +166,8 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	flNetwork := cmd.Bool("n", true, "Enable networking for this container")
 	flNetwork := cmd.Bool("n", true, "Enable networking for this container")
 	flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container")
 	flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container")
 	flAutoRemove := cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)")
 	flAutoRemove := cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)")
+	cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)")
+	cmd.String("name", "", "Assign a name to the container")
 
 
 	if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
 	if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
 		//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
 		//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
@@ -135,35 +176,45 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 
 
 	flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
 	flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
 
 
-	var flPorts ListOpts
-	cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
+	var flPublish utils.ListOpts
+	cmd.Var(&flPublish, "p", "Publish a container's port to the host (use 'docker port' to see the actual mapping)")
+
+	var flExpose utils.ListOpts
+	cmd.Var(&flExpose, "expose", "Expose a port from the container without publishing it to your host")
 
 
-	var flEnv ListOpts
+	var flEnv utils.ListOpts
 	cmd.Var(&flEnv, "e", "Set environment variables")
 	cmd.Var(&flEnv, "e", "Set environment variables")
 
 
-	var flDns ListOpts
+	var flDns utils.ListOpts
 	cmd.Var(&flDns, "dns", "Set custom dns servers")
 	cmd.Var(&flDns, "dns", "Set custom dns servers")
 
 
 	flVolumes := NewPathOpts()
 	flVolumes := NewPathOpts()
 	cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
 	cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
 
 
-	var flVolumesFrom ListOpts
+	var flVolumesFrom utils.ListOpts
 	cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container")
 	cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container")
 
 
 	flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
 	flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
 
 
-	var flLxcOpts ListOpts
+	var flLxcOpts utils.ListOpts
 	cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
 	cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
 
 
+	var flLinks utils.ListOpts
+	cmd.Var(&flLinks, "link", "Add link to another container (name:alias)")
+
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil, nil, cmd, err
 		return nil, nil, cmd, err
 	}
 	}
 	if *flDetach && len(flAttach) > 0 {
 	if *flDetach && len(flAttach) > 0 {
-		return nil, nil, cmd, fmt.Errorf("Conflicting options: -a and -d")
+		return nil, nil, cmd, ErrConflictAttachDetach
 	}
 	}
 	if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
 	if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
-		return nil, nil, cmd, ErrInvaidWorikingDirectory
+		return nil, nil, cmd, ErrInvalidWorikingDirectory
 	}
 	}
+	if *flDetach && *flAutoRemove {
+		return nil, nil, cmd, ErrConflictDetachAutoRemove
+	}
+
 	// If neither -d or -a are set, attach to everything by default
 	// If neither -d or -a are set, attach to everything by default
 	if len(flAttach) == 0 && !*flDetach {
 	if len(flAttach) == 0 && !*flDetach {
 		if !*flDetach {
 		if !*flDetach {
@@ -175,8 +226,16 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 		}
 		}
 	}
 	}
 
 
-	if *flDetach && *flAutoRemove {
-		return nil, nil, cmd, fmt.Errorf("Conflicting options: -rm and -d")
+	envs := []string{}
+
+	for _, env := range flEnv {
+		arr := strings.Split(env, "=")
+		if len(arr) > 1 {
+			envs = append(envs, env)
+		} else {
+			v := os.Getenv(env)
+			envs = append(envs, env+"="+v)
+		}
 	}
 	}
 
 
 	var binds []string
 	var binds []string
@@ -220,10 +279,28 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 		hostname = parts[0]
 		hostname = parts[0]
 		domainname = parts[1]
 		domainname = parts[1]
 	}
 	}
+
+	ports, portBindings, err := parsePortSpecs(flPublish)
+	if err != nil {
+		return nil, nil, cmd, err
+	}
+
+	// Merge in exposed ports to the map of published ports
+	for _, e := range flExpose {
+		if strings.Contains(e, ":") {
+			return nil, nil, cmd, fmt.Errorf("Invalid port format for -expose: %s", e)
+		}
+		p := NewPort(splitProtoPort(e))
+		if _, exists := ports[p]; !exists {
+			ports[p] = struct{}{}
+		}
+	}
+
 	config := &Config{
 	config := &Config{
-		Hostname:        hostname,
+		Hostname:        *flHostname,
 		Domainname:      domainname,
 		Domainname:      domainname,
-		PortSpecs:       flPorts,
+		PortSpecs:       nil, // Deprecated
+		ExposedPorts:    ports,
 		User:            *flUser,
 		User:            *flUser,
 		Tty:             *flTty,
 		Tty:             *flTty,
 		NetworkDisabled: !*flNetwork,
 		NetworkDisabled: !*flNetwork,
@@ -233,7 +310,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 		AttachStdin:     flAttach.Get("stdin"),
 		AttachStdin:     flAttach.Get("stdin"),
 		AttachStdout:    flAttach.Get("stdout"),
 		AttachStdout:    flAttach.Get("stdout"),
 		AttachStderr:    flAttach.Get("stderr"),
 		AttachStderr:    flAttach.Get("stderr"),
-		Env:             flEnv,
+		Env:             envs,
 		Cmd:             runCmd,
 		Cmd:             runCmd,
 		Dns:             flDns,
 		Dns:             flDns,
 		Image:           image,
 		Image:           image,
@@ -243,10 +320,13 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 		Privileged:      *flPrivileged,
 		Privileged:      *flPrivileged,
 		WorkingDir:      *flWorkingDir,
 		WorkingDir:      *flWorkingDir,
 	}
 	}
+
 	hostConfig := &HostConfig{
 	hostConfig := &HostConfig{
 		Binds:           binds,
 		Binds:           binds,
 		ContainerIDFile: *flContainerIDFile,
 		ContainerIDFile: *flContainerIDFile,
 		LxcConf:         lxcConf,
 		LxcConf:         lxcConf,
+		PortBindings:    portBindings,
+		Links:           flLinks,
 	}
 	}
 
 
 	if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
 	if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
@@ -261,36 +341,38 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	return config, hostConfig, cmd, nil
 	return config, hostConfig, cmd, nil
 }
 }
 
 
-type PortMapping map[string]string
+type PortMapping map[string]string // Deprecated
 
 
 type NetworkSettings struct {
 type NetworkSettings struct {
 	IPAddress   string
 	IPAddress   string
 	IPPrefixLen int
 	IPPrefixLen int
 	Gateway     string
 	Gateway     string
 	Bridge      string
 	Bridge      string
-	PortMapping map[string]PortMapping
+	PortMapping map[string]PortMapping // Deprecated
+	Ports       map[Port][]PortBinding
 }
 }
 
 
-// returns a more easy to process description of the port mapping defined in the settings
 func (settings *NetworkSettings) PortMappingAPI() []APIPort {
 func (settings *NetworkSettings) PortMappingAPI() []APIPort {
 	var mapping []APIPort
 	var mapping []APIPort
-	for private, public := range settings.PortMapping["Tcp"] {
-		pubint, _ := strconv.ParseInt(public, 0, 0)
-		privint, _ := strconv.ParseInt(private, 0, 0)
-		mapping = append(mapping, APIPort{
-			PrivatePort: privint,
-			PublicPort:  pubint,
-			Type:        "tcp",
-		})
-	}
-	for private, public := range settings.PortMapping["Udp"] {
-		pubint, _ := strconv.ParseInt(public, 0, 0)
-		privint, _ := strconv.ParseInt(private, 0, 0)
-		mapping = append(mapping, APIPort{
-			PrivatePort: privint,
-			PublicPort:  pubint,
-			Type:        "udp",
-		})
+	for port, bindings := range settings.Ports {
+		p, _ := parsePort(port.Port())
+		if len(bindings) == 0 {
+			mapping = append(mapping, APIPort{
+				PublicPort: int64(p),
+				Type:       port.Proto(),
+			})
+			continue
+		}
+		for _, binding := range bindings {
+			p, _ := parsePort(port.Port())
+			h, _ := parsePort(binding.HostPort)
+			mapping = append(mapping, APIPort{
+				PrivatePort: int64(p),
+				PublicPort:  int64(h),
+				Type:        port.Proto(),
+				IP:          binding.HostIp,
+			})
+		}
 	}
 	}
 	return mapping
 	return mapping
 }
 }
@@ -361,6 +443,15 @@ func (container *Container) SaveHostConfig(hostConfig *HostConfig) (err error) {
 	return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
 	return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
 }
 }
 
 
+func (container *Container) generateEnvConfig(env []string) error {
+	data, err := json.Marshal(env)
+	if err != nil {
+		return err
+	}
+	ioutil.WriteFile(container.EnvConfigPath(), data, 0600)
+	return nil
+}
+
 func (container *Container) generateLXCConfig(hostConfig *HostConfig) error {
 func (container *Container) generateLXCConfig(hostConfig *HostConfig) error {
 	fo, err := os.Create(container.lxcConfigPath())
 	fo, err := os.Create(container.lxcConfigPath())
 	if err != nil {
 	if err != nil {
@@ -390,9 +481,9 @@ func (container *Container) startPty() error {
 	// Copy the PTYs to our broadcasters
 	// Copy the PTYs to our broadcasters
 	go func() {
 	go func() {
 		defer container.stdout.CloseWriters()
 		defer container.stdout.CloseWriters()
-		utils.Debugf("[startPty] Begin of stdout pipe")
+		utils.Debugf("startPty: begin of stdout pipe")
 		io.Copy(container.stdout, ptyMaster)
 		io.Copy(container.stdout, ptyMaster)
-		utils.Debugf("[startPty] End of stdout pipe")
+		utils.Debugf("startPty: end of stdout pipe")
 	}()
 	}()
 
 
 	// stdin
 	// stdin
@@ -401,9 +492,9 @@ func (container *Container) startPty() error {
 		container.cmd.SysProcAttr.Setctty = true
 		container.cmd.SysProcAttr.Setctty = true
 		go func() {
 		go func() {
 			defer container.stdin.Close()
 			defer container.stdin.Close()
-			utils.Debugf("[startPty] Begin of stdin pipe")
+			utils.Debugf("startPty: begin of stdin pipe")
 			io.Copy(ptyMaster, container.stdin)
 			io.Copy(ptyMaster, container.stdin)
-			utils.Debugf("[startPty] End of stdin pipe")
+			utils.Debugf("startPty: end of stdin pipe")
 		}()
 		}()
 	}
 	}
 	if err := container.cmd.Start(); err != nil {
 	if err := container.cmd.Start(); err != nil {
@@ -423,9 +514,9 @@ func (container *Container) start() error {
 		}
 		}
 		go func() {
 		go func() {
 			defer stdin.Close()
 			defer stdin.Close()
-			utils.Debugf("Begin of stdin pipe [start]")
+			utils.Debugf("start: begin of stdin pipe")
 			io.Copy(stdin, container.stdin)
 			io.Copy(stdin, container.stdin)
-			utils.Debugf("End of stdin pipe [start]")
+			utils.Debugf("start: end of stdin pipe")
 		}()
 		}()
 	}
 	}
 	return container.cmd.Start()
 	return container.cmd.Start()
@@ -442,8 +533,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 			errors <- err
 			errors <- err
 		} else {
 		} else {
 			go func() {
 			go func() {
-				utils.Debugf("[start] attach stdin\n")
-				defer utils.Debugf("[end] attach stdin\n")
+				utils.Debugf("attach: stdin: begin")
+				defer utils.Debugf("attach: stdin: end")
 				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
 				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
 				if container.Config.StdinOnce && !container.Config.Tty {
 				if container.Config.StdinOnce && !container.Config.Tty {
 					defer cStdin.Close()
 					defer cStdin.Close()
@@ -460,11 +551,13 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 				} else {
 				} else {
 					_, err = io.Copy(cStdin, stdin)
 					_, err = io.Copy(cStdin, stdin)
 				}
 				}
+				if err == io.ErrClosedPipe {
+					err = nil
+				}
 				if err != nil {
 				if err != nil {
-					utils.Debugf("[error] attach stdin: %s\n", err)
+					utils.Errorf("attach: stdin: %s", err)
 				}
 				}
-				// Discard error, expecting pipe error
-				errors <- nil
+				errors <- err
 			}()
 			}()
 		}
 		}
 	}
 	}
@@ -475,8 +568,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 		} else {
 		} else {
 			cStdout = p
 			cStdout = p
 			go func() {
 			go func() {
-				utils.Debugf("[start] attach stdout\n")
-				defer utils.Debugf("[end]  attach stdout\n")
+				utils.Debugf("attach: stdout: begin")
+				defer utils.Debugf("attach: stdout: end")
 				// If we are in StdinOnce mode, then close stdin
 				// If we are in StdinOnce mode, then close stdin
 				if container.Config.StdinOnce && stdin != nil {
 				if container.Config.StdinOnce && stdin != nil {
 					defer stdin.Close()
 					defer stdin.Close()
@@ -485,8 +578,11 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 					defer stdinCloser.Close()
 					defer stdinCloser.Close()
 				}
 				}
 				_, err := io.Copy(stdout, cStdout)
 				_, err := io.Copy(stdout, cStdout)
+				if err == io.ErrClosedPipe {
+					err = nil
+				}
 				if err != nil {
 				if err != nil {
-					utils.Debugf("[error] attach stdout: %s\n", err)
+					utils.Errorf("attach: stdout: %s", err)
 				}
 				}
 				errors <- err
 				errors <- err
 			}()
 			}()
@@ -496,9 +592,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 			if stdinCloser != nil {
 			if stdinCloser != nil {
 				defer stdinCloser.Close()
 				defer stdinCloser.Close()
 			}
 			}
-
 			if cStdout, err := container.StdoutPipe(); err != nil {
 			if cStdout, err := container.StdoutPipe(); err != nil {
-				utils.Debugf("Error stdout pipe")
+				utils.Errorf("attach: stdout pipe: %s", err)
 			} else {
 			} else {
 				io.Copy(&utils.NopWriter{}, cStdout)
 				io.Copy(&utils.NopWriter{}, cStdout)
 			}
 			}
@@ -511,8 +606,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 		} else {
 		} else {
 			cStderr = p
 			cStderr = p
 			go func() {
 			go func() {
-				utils.Debugf("[start] attach stderr\n")
-				defer utils.Debugf("[end]  attach stderr\n")
+				utils.Debugf("attach: stderr: begin")
+				defer utils.Debugf("attach: stderr: end")
 				// If we are in StdinOnce mode, then close stdin
 				// If we are in StdinOnce mode, then close stdin
 				if container.Config.StdinOnce && stdin != nil {
 				if container.Config.StdinOnce && stdin != nil {
 					defer stdin.Close()
 					defer stdin.Close()
@@ -521,8 +616,11 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 					defer stdinCloser.Close()
 					defer stdinCloser.Close()
 				}
 				}
 				_, err := io.Copy(stderr, cStderr)
 				_, err := io.Copy(stderr, cStderr)
+				if err == io.ErrClosedPipe {
+					err = nil
+				}
 				if err != nil {
 				if err != nil {
-					utils.Debugf("[error] attach stderr: %s\n", err)
+					utils.Errorf("attach: stderr: %s", err)
 				}
 				}
 				errors <- err
 				errors <- err
 			}()
 			}()
@@ -534,7 +632,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 			}
 			}
 
 
 			if cStderr, err := container.StderrPipe(); err != nil {
 			if cStderr, err := container.StderrPipe(); err != nil {
-				utils.Debugf("Error stdout pipe")
+				utils.Errorf("attach: stdout pipe: %s", err)
 			} else {
 			} else {
 				io.Copy(&utils.NopWriter{}, cStderr)
 				io.Copy(&utils.NopWriter{}, cStderr)
 			}
 			}
@@ -548,24 +646,29 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 		if cStderr != nil {
 		if cStderr != nil {
 			defer cStderr.Close()
 			defer cStderr.Close()
 		}
 		}
-		// FIXME: how do clean up the stdin goroutine without the unwanted side effect
+		// FIXME: how to clean up the stdin goroutine without the unwanted side effect
 		// of closing the passed stdin? Add an intermediary io.Pipe?
 		// of closing the passed stdin? Add an intermediary io.Pipe?
 		for i := 0; i < nJobs; i += 1 {
 		for i := 0; i < nJobs; i += 1 {
-			utils.Debugf("Waiting for job %d/%d\n", i+1, nJobs)
+			utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
 			if err := <-errors; err != nil {
 			if err := <-errors; err != nil {
-				utils.Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
+				utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
 				return err
 				return err
 			}
 			}
-			utils.Debugf("Job %d completed successfully\n", i+1)
+			utils.Debugf("attach: job %d completed successfully", i+1)
 		}
 		}
-		utils.Debugf("All jobs completed successfully\n")
+		utils.Debugf("attach: all jobs completed successfully")
 		return nil
 		return nil
 	})
 	})
 }
 }
 
 
-func (container *Container) Start(hostConfig *HostConfig) error {
+func (container *Container) Start(hostConfig *HostConfig) (err error) {
 	container.State.Lock()
 	container.State.Lock()
 	defer container.State.Unlock()
 	defer container.State.Unlock()
+	defer func() {
+		if err != nil {
+			container.cleanup()
+		}
+	}()
 
 
 	if hostConfig == nil { // in docker start of docker restart we want to reuse previous HostConfigFile
 	if hostConfig == nil { // in docker start of docker restart we want to reuse previous HostConfigFile
 		hostConfig, _ = container.ReadHostConfig()
 		hostConfig, _ = container.ReadHostConfig()
@@ -581,7 +684,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 		container.Config.NetworkDisabled = true
 		container.Config.NetworkDisabled = true
 		container.buildHostnameAndHostsFiles("127.0.1.1")
 		container.buildHostnameAndHostsFiles("127.0.1.1")
 	} else {
 	} else {
-		if err := container.allocateNetwork(); err != nil {
+		if err := container.allocateNetwork(hostConfig); err != nil {
 			return err
 			return err
 		}
 		}
 		container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
 		container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
@@ -761,17 +864,65 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 		params = append(params, "-u", container.Config.User)
 		params = append(params, "-u", container.Config.User)
 	}
 	}
 
 
+	// Setup environment
+	env := []string{
+		"HOME=/",
+		"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+		"container=lxc",
+		"HOSTNAME=" + container.Config.Hostname,
+	}
+
 	if container.Config.Tty {
 	if container.Config.Tty {
-		params = append(params, "-e", "TERM=xterm")
+		env = append(env, "TERM=xterm")
+	}
+
+	// Init any links between the parent and children
+	runtime := container.runtime
+
+	children, err := runtime.Children(container.Name)
+	if err != nil {
+		return err
+	}
+
+	if len(children) > 0 {
+		container.activeLinks = make(map[string]*Link, len(children))
+
+		// If we encounter an error make sure that we rollback any network
+		// config and ip table changes
+		rollback := func() {
+			for _, link := range container.activeLinks {
+				link.Disable()
+			}
+			container.activeLinks = nil
+		}
+
+		for p, child := range children {
+			link, err := NewLink(container, child, p, runtime.networkManager.bridgeIface)
+			if err != nil {
+				rollback()
+				return err
+			}
+
+			container.activeLinks[link.Alias()] = link
+			if err := link.Enable(); err != nil {
+				rollback()
+				return err
+			}
+
+			for _, envVar := range link.ToEnv() {
+				env = append(env, envVar)
+			}
+		}
+	}
+
+	for _, elem := range container.Config.Env {
+		env = append(env, elem)
+	}
+
+	if err := container.generateEnvConfig(env); err != nil {
+		return err
 	}
 	}
 
 
-	// Setup environment
-	params = append(params,
-		"-e", "HOME=/",
-		"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-		"-e", "container=lxc",
-		"-e", "HOSTNAME="+container.Config.Hostname,
-	)
 	if container.Config.WorkingDir != "" {
 	if container.Config.WorkingDir != "" {
 		workingDir := path.Clean(container.Config.WorkingDir)
 		workingDir := path.Clean(container.Config.WorkingDir)
 		utils.Debugf("[working dir] working dir is %s", workingDir)
 		utils.Debugf("[working dir] working dir is %s", workingDir)
@@ -785,10 +936,6 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 		)
 		)
 	}
 	}
 
 
-	for _, elem := range container.Config.Env {
-		params = append(params, "-e", elem)
-	}
-
 	// Program
 	// Program
 	params = append(params, "--", container.Path)
 	params = append(params, "--", container.Path)
 	params = append(params, container.Args...)
 	params = append(params, container.Args...)
@@ -805,7 +952,6 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 
 
 	container.cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
 	container.cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
 
 
-	var err error
 	if container.Config.Tty {
 	if container.Config.Tty {
 		err = container.startPty()
 		err = container.startPty()
 	} else {
 	} else {
@@ -824,12 +970,43 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 	container.ToDisk()
 	container.ToDisk()
 	container.SaveHostConfig(hostConfig)
 	container.SaveHostConfig(hostConfig)
 	go container.monitor(hostConfig)
 	go container.monitor(hostConfig)
-	return nil
+
+	defer utils.Debugf("Container running: %v", container.State.Running)
+	// We wait for the container to be fully running.
+	// Timeout after 5 seconds. In case of broken pipe, just retry.
+	// Note: The container can run and finish correctly before
+	//       the end of this loop
+	for now := time.Now(); time.Since(now) < 5*time.Second; {
+		// If the container dies while waiting for it, just return
+		if !container.State.Running {
+			return nil
+		}
+		output, err := exec.Command("lxc-info", "-s", "-n", container.ID).CombinedOutput()
+		if err != nil {
+			utils.Debugf("Error with lxc-info: %s (%s)", err, output)
+
+			output, err = exec.Command("lxc-info", "-s", "-n", container.ID).CombinedOutput()
+			if err != nil {
+				utils.Debugf("Second Error with lxc-info: %s (%s)", err, output)
+				return err
+			}
+
+		}
+		if strings.Contains(string(output), "RUNNING") {
+			return nil
+		}
+		utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.Running, bytes.TrimSpace(output))
+		time.Sleep(50 * time.Millisecond)
+	}
+
+	if container.State.Running {
+		return ErrContainerStartTimeout
+	}
+	return ErrContainerStart
 }
 }
 
 
 func (container *Container) Run() error {
 func (container *Container) Run() error {
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(&HostConfig{}); err != nil {
 		return err
 		return err
 	}
 	}
 	container.Wait()
 	container.Wait()
@@ -851,9 +1028,14 @@ func (container *Container) Output() (output []byte, err error) {
 	return output, err
 	return output, err
 }
 }
 
 
-// StdinPipe() returns a pipe connected to the standard input of the container's
-// active process.
-//
+// Container.StdinPipe returns a WriteCloser which can be used to feed data
+// to the standard input of the container's active process.
+// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser
+// which can be used to retrieve the standard output (and error) generated
+// by the container's active process. The output (and error) are actually
+// copied and delivered to all StdoutPipe and StderrPipe consumers, using
+// a kind of "broadcaster".
+
 func (container *Container) StdinPipe() (io.WriteCloser, error) {
 func (container *Container) StdinPipe() (io.WriteCloser, error) {
 	return container.stdinPipe, nil
 	return container.stdinPipe, nil
 }
 }
@@ -894,7 +1076,7 @@ ff02::2		ip6-allrouters
 	ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
 	ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
 }
 }
 
 
-func (container *Container) allocateNetwork() error {
+func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
 	if container.Config.NetworkDisabled {
 	if container.Config.NetworkDisabled {
 		return nil
 		return nil
 	}
 	}
@@ -921,41 +1103,67 @@ func (container *Container) allocateNetwork() error {
 		}
 		}
 	}
 	}
 
 
-	var portSpecs []string
+	if container.Config.PortSpecs != nil {
+		utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", "))
+		if err := migratePortMappings(container.Config, hostConfig); err != nil {
+			return err
+		}
+		container.Config.PortSpecs = nil
+		if err := container.SaveHostConfig(hostConfig); err != nil {
+			return err
+		}
+	}
+
+	portSpecs := make(map[Port]struct{})
+	bindings := make(map[Port][]PortBinding)
+
 	if !container.State.Ghost {
 	if !container.State.Ghost {
-		portSpecs = container.Config.PortSpecs
-	} else {
-		for backend, frontend := range container.NetworkSettings.PortMapping["Tcp"] {
-			portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/tcp", frontend, backend))
+		if container.Config.ExposedPorts != nil {
+			portSpecs = container.Config.ExposedPorts
+		}
+		if hostConfig.PortBindings != nil {
+			bindings = hostConfig.PortBindings
 		}
 		}
-		for backend, frontend := range container.NetworkSettings.PortMapping["Udp"] {
-			portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/udp", frontend, backend))
+	} else {
+		if container.NetworkSettings.Ports != nil {
+			for port, binding := range container.NetworkSettings.Ports {
+				portSpecs[port] = struct{}{}
+				bindings[port] = binding
+			}
 		}
 		}
 	}
 	}
 
 
-	container.NetworkSettings.PortMapping = make(map[string]PortMapping)
-	container.NetworkSettings.PortMapping["Tcp"] = make(PortMapping)
-	container.NetworkSettings.PortMapping["Udp"] = make(PortMapping)
-	for _, spec := range portSpecs {
-		nat, err := iface.AllocatePort(spec)
-		if err != nil {
-			iface.Release()
-			return err
+	container.NetworkSettings.PortMapping = nil
+
+	for port := range portSpecs {
+		binding := bindings[port]
+		for i := 0; i < len(binding); i++ {
+			b := binding[i]
+			nat, err := iface.AllocatePort(port, b)
+			if err != nil {
+				iface.Release()
+				return err
+			}
+			utils.Debugf("Allocate port: %s:%s->%s", nat.Binding.HostIp, port, nat.Binding.HostPort)
+			binding[i] = nat.Binding
 		}
 		}
-		proto := strings.Title(nat.Proto)
-		backend, frontend := strconv.Itoa(nat.Backend), strconv.Itoa(nat.Frontend)
-		container.NetworkSettings.PortMapping[proto][backend] = frontend
+		bindings[port] = binding
 	}
 	}
+	container.SaveHostConfig(hostConfig)
+
+	container.NetworkSettings.Ports = bindings
 	container.network = iface
 	container.network = iface
+
 	container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
 	container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
 	container.NetworkSettings.IPAddress = iface.IPNet.IP.String()
 	container.NetworkSettings.IPAddress = iface.IPNet.IP.String()
 	container.NetworkSettings.IPPrefixLen, _ = iface.IPNet.Mask.Size()
 	container.NetworkSettings.IPPrefixLen, _ = iface.IPNet.Mask.Size()
 	container.NetworkSettings.Gateway = iface.Gateway.String()
 	container.NetworkSettings.Gateway = iface.Gateway.String()
+
 	return nil
 	return nil
 }
 }
 
 
 func (container *Container) releaseNetwork() {
 func (container *Container) releaseNetwork() {
-	if container.Config.NetworkDisabled {
+	if container.Config.NetworkDisabled || container.network == nil {
 		return
 		return
 	}
 	}
 	container.network.Release()
 	container.network.Release()
@@ -963,7 +1171,7 @@ func (container *Container) releaseNetwork() {
 	container.NetworkSettings = &NetworkSettings{}
 	container.NetworkSettings = &NetworkSettings{}
 }
 }
 
 
-// FIXME: replace this with a control socket within docker-init
+// FIXME: replace this with a control socket within dockerinit
 func (container *Container) waitLxc() error {
 func (container *Container) waitLxc() error {
 	for {
 	for {
 		output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput()
 		output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput()
@@ -979,20 +1187,23 @@ func (container *Container) waitLxc() error {
 
 
 func (container *Container) monitor(hostConfig *HostConfig) {
 func (container *Container) monitor(hostConfig *HostConfig) {
 	// Wait for the program to exit
 	// Wait for the program to exit
-	utils.Debugf("Waiting for process")
 
 
-	// If the command does not exists, try to wait via lxc
+	// If the command does not exist, try to wait via lxc
+	// (This probably happens only for ghost containers, i.e. containers that were running when Docker started)
 	if container.cmd == nil {
 	if container.cmd == nil {
+		utils.Debugf("monitor: waiting for container %s using waitLxc", container.ID)
 		if err := container.waitLxc(); err != nil {
 		if err := container.waitLxc(); err != nil {
-			utils.Debugf("%s: Process: %s", container.ID, err)
+			utils.Errorf("monitor: while waiting for container %s, waitLxc had a problem: %s", container.ID, err)
 		}
 		}
 	} else {
 	} else {
+		utils.Debugf("monitor: waiting for container %s using cmd.Wait", container.ID)
 		if err := container.cmd.Wait(); err != nil {
 		if err := container.cmd.Wait(); err != nil {
-			// Discard the error as any signals or non 0 returns will generate an error
-			utils.Debugf("%s: Process: %s", container.ID, err)
+			// Since non-zero exit status and signal terminations will cause err to be non-nil,
+			// we have to actually discard it. Still, log it anyway, just in case.
+			utils.Debugf("monitor: cmd.Wait reported exit status %s for container %s", err, container.ID)
 		}
 		}
 	}
 	}
-	utils.Debugf("Process finished")
+	utils.Debugf("monitor: container %s finished", container.ID)
 
 
 	exitCode := -1
 	exitCode := -1
 	if container.cmd != nil {
 	if container.cmd != nil {
@@ -1007,96 +1218,111 @@ func (container *Container) monitor(hostConfig *HostConfig) {
 	}
 	}
 
 
 	// Cleanup
 	// Cleanup
+	container.cleanup()
+
+	// Re-create a brand new stdin pipe once the container exited
+	if container.Config.OpenStdin {
+		container.stdin, container.stdinPipe = io.Pipe()
+	}
+
+	// Release the lock
+	close(container.waitLock)
+
+	if err := container.ToDisk(); err != nil {
+		// FIXME: there is a race condition here which causes this to fail during the unit tests.
+		// If another goroutine was waiting for Wait() to return before removing the container's root
+		// from the filesystem... At this point it may already have done so.
+		// This is because State.setStopped() has already been called, and has caused Wait()
+		// to return.
+		// FIXME: why are we serializing running state to disk in the first place?
+		//log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
+	}
+}
+
+func (container *Container) cleanup() {
 	container.releaseNetwork()
 	container.releaseNetwork()
+
+	// Disable all active links
+	if container.activeLinks != nil {
+		for _, link := range container.activeLinks {
+			link.Disable()
+		}
+	}
+
 	if container.Config.OpenStdin {
 	if container.Config.OpenStdin {
 		if err := container.stdin.Close(); err != nil {
 		if err := container.stdin.Close(); err != nil {
-			utils.Debugf("%s: Error close stdin: %s", container.ID, err)
+			utils.Errorf("%s: Error close stdin: %s", container.ID, err)
 		}
 		}
 	}
 	}
 	if err := container.stdout.CloseWriters(); err != nil {
 	if err := container.stdout.CloseWriters(); err != nil {
-		utils.Debugf("%s: Error close stdout: %s", container.ID, err)
+		utils.Errorf("%s: Error close stdout: %s", container.ID, err)
 	}
 	}
 	if err := container.stderr.CloseWriters(); err != nil {
 	if err := container.stderr.CloseWriters(); err != nil {
-		utils.Debugf("%s: Error close stderr: %s", container.ID, err)
+		utils.Errorf("%s: Error close stderr: %s", container.ID, err)
 	}
 	}
 
 
 	if container.ptyMaster != nil {
 	if container.ptyMaster != nil {
 		if err := container.ptyMaster.Close(); err != nil {
 		if err := container.ptyMaster.Close(); err != nil {
-			utils.Debugf("%s: Error closing Pty master: %s", container.ID, err)
+			utils.Errorf("%s: Error closing Pty master: %s", container.ID, err)
 		}
 		}
 	}
 	}
 
 
 	if err := container.Unmount(); err != nil {
 	if err := container.Unmount(); err != nil {
 		log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
 		log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
 	}
 	}
+}
 
 
-	// Re-create a brand new stdin pipe once the container exited
-	if container.Config.OpenStdin {
-		container.stdin, container.stdinPipe = io.Pipe()
-	}
+func (container *Container) kill(sig int) error {
+	container.State.Lock()
+	defer container.State.Unlock()
 
 
-	// Release the lock
-	close(container.waitLock)
+	if !container.State.Running {
+		return nil
+	}
 
 
-	if err := container.ToDisk(); err != nil {
-		// FIXME: there is a race condition here which causes this to fail during the unit tests.
-		// If another goroutine was waiting for Wait() to return before removing the container's root
-		// from the filesystem... At this point it may already have done so.
-		// This is because State.setStopped() has already been called, and has caused Wait()
-		// to return.
-		// FIXME: why are we serializing running state to disk in the first place?
-		//log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
+	if output, err := exec.Command("lxc-kill", "-n", container.ID, strconv.Itoa(sig)).CombinedOutput(); err != nil {
+		log.Printf("error killing container %s (%s, %s)", container.ShortID(), output, err)
+		return err
 	}
 	}
+
+	return nil
 }
 }
 
 
-func (container *Container) kill() error {
+func (container *Container) Kill() error {
 	if !container.State.Running {
 	if !container.State.Running {
 		return nil
 		return nil
 	}
 	}
 
 
-	// Sending SIGKILL to the process via lxc
-	output, err := exec.Command("lxc-kill", "-n", container.ID, "9").CombinedOutput()
-	if err != nil {
-		log.Printf("error killing container %s (%s, %s)", container.ID, output, err)
+	// 1. Send SIGKILL
+	if err := container.kill(9); err != nil {
+		return err
 	}
 	}
 
 
 	// 2. Wait for the process to die, in last resort, try to kill the process directly
 	// 2. Wait for the process to die, in last resort, try to kill the process directly
 	if err := container.WaitTimeout(10 * time.Second); err != nil {
 	if err := container.WaitTimeout(10 * time.Second); err != nil {
 		if container.cmd == nil {
 		if container.cmd == nil {
-			return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.ID)
+			return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.ShortID())
 		}
 		}
-		log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.ID)
+		log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", container.ShortID())
 		if err := container.cmd.Process.Kill(); err != nil {
 		if err := container.cmd.Process.Kill(); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
 
 
-	// Wait for the container to be actually stopped
 	container.Wait()
 	container.Wait()
 	return nil
 	return nil
 }
 }
 
 
-func (container *Container) Kill() error {
-	container.State.Lock()
-	defer container.State.Unlock()
-	if !container.State.Running {
-		return nil
-	}
-	return container.kill()
-}
-
 func (container *Container) Stop(seconds int) error {
 func (container *Container) Stop(seconds int) error {
-	container.State.Lock()
-	defer container.State.Unlock()
 	if !container.State.Running {
 	if !container.State.Running {
 		return nil
 		return nil
 	}
 	}
 
 
 	// 1. Send a SIGTERM
 	// 1. Send a SIGTERM
-	if output, err := exec.Command("lxc-kill", "-n", container.ID, "15").CombinedOutput(); err != nil {
-		log.Print(string(output))
+	if err := container.kill(15); err != nil {
+		utils.Debugf("Error sending kill SIGTERM: %s", err)
 		log.Print("Failed to send SIGTERM to the process, force killing")
 		log.Print("Failed to send SIGTERM to the process, force killing")
-		if err := container.kill(); err != nil {
+		if err := container.kill(9); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -1104,7 +1330,8 @@ func (container *Container) Stop(seconds int) error {
 	// 2. Wait for the process to exit on its own
 	// 2. Wait for the process to exit on its own
 	if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
 	if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
 		log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
 		log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
-		if err := container.kill(); err != nil {
+		// 3. If it doesn't, then send SIGKILL
+		if err := container.Kill(); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -1207,6 +1434,12 @@ func (container *Container) Mounted() (bool, error) {
 }
 }
 
 
 func (container *Container) Unmount() error {
 func (container *Container) Unmount() error {
+	if _, err := os.Stat(container.RootfsPath()); err != nil {
+		if os.IsNotExist(err) {
+			return nil
+		}
+		return err
+	}
 	return Unmount(container.RootfsPath())
 	return Unmount(container.RootfsPath())
 }
 }
 
 
@@ -1234,6 +1467,10 @@ func (container *Container) jsonPath() string {
 	return path.Join(container.root, "config.json")
 	return path.Join(container.root, "config.json")
 }
 }
 
 
+func (container *Container) EnvConfigPath() string {
+	return path.Join(container.root, "config.env")
+}
+
 func (container *Container) lxcConfigPath() string {
 func (container *Container) lxcConfigPath() string {
 	return path.Join(container.root, "config.lxc")
 	return path.Join(container.root, "config.lxc")
 }
 }
@@ -1297,3 +1534,9 @@ func (container *Container) Copy(resource string) (Archive, error) {
 	}
 	}
 	return TarFilter(basePath, Uncompressed, filter)
 	return TarFilter(basePath, Uncompressed, filter)
 }
 }
+
+// Returns true if the container exposes a certain port
+func (container *Container) Exposes(p Port) bool {
+	_, exists := container.Config.ExposedPorts[p]
+	return exists
+}

+ 101 - 44
container_test.go

@@ -18,11 +18,12 @@ import (
 func TestIDFormat(t *testing.T) {
 func TestIDFormat(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container1, err := runtime.Create(
+	container1, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image: GetTestImage(runtime).ID,
 			Image: GetTestImage(runtime).ID,
 			Cmd:   []string{"/bin/sh", "-c", "echo hello world"},
 			Cmd:   []string{"/bin/sh", "-c", "echo hello world"},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -388,11 +389,12 @@ func TestRun(t *testing.T) {
 func TestOutput(t *testing.T) {
 func TestOutput(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image: GetTestImage(runtime).ID,
 			Image: GetTestImage(runtime).ID,
 			Cmd:   []string{"echo", "-n", "foobar"},
 			Cmd:   []string{"echo", "-n", "foobar"},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -407,16 +409,39 @@ func TestOutput(t *testing.T) {
 	}
 	}
 }
 }
 
 
+func TestContainerNetwork(t *testing.T) {
+	runtime := mkRuntime(t)
+	defer nuke(runtime)
+	container, _, err := runtime.Create(
+		&Config{
+			Image: GetTestImage(runtime).ID,
+			Cmd:   []string{"ping", "-c", "1", "127.0.0.1"},
+		},
+		"",
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer runtime.Destroy(container)
+	if err := container.Run(); err != nil {
+		t.Fatal(err)
+	}
+	if container.State.ExitCode != 0 {
+		t.Errorf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", container.State.ExitCode)
+	}
+}
+
 func TestKillDifferentUser(t *testing.T) {
 func TestKillDifferentUser(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image:     GetTestImage(runtime).ID,
 		Image:     GetTestImage(runtime).ID,
 		Cmd:       []string{"cat"},
 		Cmd:       []string{"cat"},
 		OpenStdin: true,
 		OpenStdin: true,
 		User:      "daemon",
 		User:      "daemon",
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -471,7 +496,7 @@ func TestCreateVolume(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	c, err := runtime.Create(config)
+	c, _, err := runtime.Create(config, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -486,10 +511,11 @@ func TestCreateVolume(t *testing.T) {
 func TestKill(t *testing.T) {
 func TestKill(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"sleep", "2"},
 		Cmd:   []string{"sleep", "2"},
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -530,10 +556,10 @@ func TestExitCode(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	trueContainer, err := runtime.Create(&Config{
+	trueContainer, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"/bin/true", ""},
 		Cmd:   []string{"/bin/true", ""},
-	})
+	}, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -545,10 +571,10 @@ func TestExitCode(t *testing.T) {
 		t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode)
 		t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode)
 	}
 	}
 
 
-	falseContainer, err := runtime.Create(&Config{
+	falseContainer, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"/bin/false", ""},
 		Cmd:   []string{"/bin/false", ""},
-	})
+	}, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -564,10 +590,11 @@ func TestExitCode(t *testing.T) {
 func TestRestart(t *testing.T) {
 func TestRestart(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"echo", "-n", "foobar"},
 		Cmd:   []string{"echo", "-n", "foobar"},
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -594,12 +621,13 @@ func TestRestart(t *testing.T) {
 func TestRestartStdin(t *testing.T) {
 func TestRestartStdin(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"cat"},
 		Cmd:   []string{"cat"},
 
 
 		OpenStdin: true,
 		OpenStdin: true,
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -672,10 +700,11 @@ func TestUser(t *testing.T) {
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
 	// Default user must be root
 	// Default user must be root
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -690,12 +719,13 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Set a username
 	// Set a username
-	container, err = runtime.Create(&Config{
+	container, _, err = runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "root",
 		User: "root",
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -710,12 +740,13 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Set a UID
 	// Set a UID
-	container, err = runtime.Create(&Config{
+	container, _, err = runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "0",
 		User: "0",
 	},
 	},
+		"",
 	)
 	)
 	if err != nil || container.State.ExitCode != 0 {
 	if err != nil || container.State.ExitCode != 0 {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -730,12 +761,13 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Set a different user by uid
 	// Set a different user by uid
-	container, err = runtime.Create(&Config{
+	container, _, err = runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "1",
 		User: "1",
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -752,12 +784,13 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Set a different user by username
 	// Set a different user by username
-	container, err = runtime.Create(&Config{
+	container, _, err = runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "daemon",
 		User: "daemon",
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -772,12 +805,13 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Test an wrong username
 	// Test an wrong username
-	container, err = runtime.Create(&Config{
+	container, _, err = runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "unknownuser",
 		User: "unknownuser",
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -793,20 +827,22 @@ func TestMultipleContainers(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	container1, err := runtime.Create(&Config{
+	container1, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"sleep", "2"},
 		Cmd:   []string{"sleep", "2"},
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	defer runtime.Destroy(container1)
 	defer runtime.Destroy(container1)
 
 
-	container2, err := runtime.Create(&Config{
+	container2, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"sleep", "2"},
 		Cmd:   []string{"sleep", "2"},
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -847,12 +883,13 @@ func TestMultipleContainers(t *testing.T) {
 func TestStdin(t *testing.T) {
 func TestStdin(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"cat"},
 		Cmd:   []string{"cat"},
 
 
 		OpenStdin: true,
 		OpenStdin: true,
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -892,12 +929,13 @@ func TestStdin(t *testing.T) {
 func TestTty(t *testing.T) {
 func TestTty(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"cat"},
 		Cmd:   []string{"cat"},
 
 
 		OpenStdin: true,
 		OpenStdin: true,
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -935,13 +973,15 @@ func TestTty(t *testing.T) {
 }
 }
 
 
 func TestEnv(t *testing.T) {
 func TestEnv(t *testing.T) {
+	os.Setenv("TRUE", "false")
+	os.Setenv("TRICKY", "tri\ncky\n")
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(&Config{
-		Image: GetTestImage(runtime).ID,
-		Cmd:   []string{"env"},
-	},
-	)
+	config, _, _, err := ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	container, _, err := runtime.Create(config, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -971,6 +1011,11 @@ func TestEnv(t *testing.T) {
 		"HOME=/",
 		"HOME=/",
 		"container=lxc",
 		"container=lxc",
 		"HOSTNAME=" + container.ShortID(),
 		"HOSTNAME=" + container.ShortID(),
+		"FALSE=true",
+		"TRUE=false",
+		"TRICKY=tri",
+		"cky",
+		"",
 	}
 	}
 	sort.Strings(goodEnv)
 	sort.Strings(goodEnv)
 	if len(goodEnv) != len(actualEnv) {
 	if len(goodEnv) != len(actualEnv) {
@@ -986,12 +1031,13 @@ func TestEnv(t *testing.T) {
 func TestEntrypoint(t *testing.T) {
 func TestEntrypoint(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:      GetTestImage(runtime).ID,
 			Image:      GetTestImage(runtime).ID,
 			Entrypoint: []string{"/bin/echo"},
 			Entrypoint: []string{"/bin/echo"},
 			Cmd:        []string{"-n", "foobar"},
 			Cmd:        []string{"-n", "foobar"},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1009,11 +1055,12 @@ func TestEntrypoint(t *testing.T) {
 func TestEntrypointNoCmd(t *testing.T) {
 func TestEntrypointNoCmd(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:      GetTestImage(runtime).ID,
 			Image:      GetTestImage(runtime).ID,
 			Entrypoint: []string{"/bin/echo", "foobar"},
 			Entrypoint: []string{"/bin/echo", "foobar"},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1060,7 +1107,7 @@ func TestLXCConfig(t *testing.T) {
 	cpuMin := 100
 	cpuMin := 100
 	cpuMax := 10000
 	cpuMax := 10000
 	cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
 	cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"/bin/true"},
 		Cmd:   []string{"/bin/true"},
 
 
@@ -1068,6 +1115,7 @@ func TestLXCConfig(t *testing.T) {
 		Memory:    int64(mem),
 		Memory:    int64(mem),
 		CpuShares: int64(cpu),
 		CpuShares: int64(cpu),
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1084,12 +1132,13 @@ func TestLXCConfig(t *testing.T) {
 func TestCustomLxcConfig(t *testing.T) {
 func TestCustomLxcConfig(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"/bin/true"},
 		Cmd:   []string{"/bin/true"},
 
 
 		Hostname: "foobar",
 		Hostname: "foobar",
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1115,10 +1164,11 @@ func BenchmarkRunSequencial(b *testing.B) {
 	runtime := mkRuntime(b)
 	runtime := mkRuntime(b)
 	defer nuke(runtime)
 	defer nuke(runtime)
 	for i := 0; i < b.N; i++ {
 	for i := 0; i < b.N; i++ {
-		container, err := runtime.Create(&Config{
+		container, _, err := runtime.Create(&Config{
 			Image: GetTestImage(runtime).ID,
 			Image: GetTestImage(runtime).ID,
 			Cmd:   []string{"echo", "-n", "foo"},
 			Cmd:   []string{"echo", "-n", "foo"},
 		},
 		},
+			"",
 		)
 		)
 		if err != nil {
 		if err != nil {
 			b.Fatal(err)
 			b.Fatal(err)
@@ -1147,10 +1197,11 @@ func BenchmarkRunParallel(b *testing.B) {
 		complete := make(chan error)
 		complete := make(chan error)
 		tasks = append(tasks, complete)
 		tasks = append(tasks, complete)
 		go func(i int, complete chan error) {
 		go func(i int, complete chan error) {
-			container, err := runtime.Create(&Config{
+			container, _, err := runtime.Create(&Config{
 				Image: GetTestImage(runtime).ID,
 				Image: GetTestImage(runtime).ID,
 				Cmd:   []string{"echo", "-n", "foo"},
 				Cmd:   []string{"echo", "-n", "foo"},
 			},
 			},
+				"",
 			)
 			)
 			if err != nil {
 			if err != nil {
 				complete <- err
 				complete <- err
@@ -1189,7 +1240,7 @@ func BenchmarkRunParallel(b *testing.B) {
 }
 }
 
 
 func tempDir(t *testing.T) string {
 func tempDir(t *testing.T) string {
-	tmpDir, err := ioutil.TempDir("", "docker-test")
+	tmpDir, err := ioutil.TempDir("", "docker-test-container")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -1297,12 +1348,13 @@ func TestBindMounts(t *testing.T) {
 func TestVolumesFromReadonlyMount(t *testing.T) {
 func TestVolumesFromReadonlyMount(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
-	container, err := runtime.Create(
+	container, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:   GetTestImage(runtime).ID,
 			Image:   GetTestImage(runtime).ID,
 			Cmd:     []string{"/bin/echo", "-n", "foobar"},
 			Cmd:     []string{"/bin/echo", "-n", "foobar"},
 			Volumes: map[string]struct{}{"/test": {}},
 			Volumes: map[string]struct{}{"/test": {}},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1316,12 +1368,13 @@ func TestVolumesFromReadonlyMount(t *testing.T) {
 		t.Fail()
 		t.Fail()
 	}
 	}
 
 
-	container2, err := runtime.Create(
+	container2, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:       GetTestImage(runtime).ID,
 			Image:       GetTestImage(runtime).ID,
 			Cmd:         []string{"/bin/echo", "-n", "foobar"},
 			Cmd:         []string{"/bin/echo", "-n", "foobar"},
 			VolumesFrom: container.ID,
 			VolumesFrom: container.ID,
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1352,11 +1405,12 @@ func TestRestartWithVolumes(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image:   GetTestImage(runtime).ID,
 		Image:   GetTestImage(runtime).ID,
 		Cmd:     []string{"echo", "-n", "foobar"},
 		Cmd:     []string{"echo", "-n", "foobar"},
 		Volumes: map[string]struct{}{"/test": {}},
 		Volumes: map[string]struct{}{"/test": {}},
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1395,11 +1449,12 @@ func TestVolumesFromWithVolumes(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image:   GetTestImage(runtime).ID,
 		Image:   GetTestImage(runtime).ID,
 		Cmd:     []string{"sh", "-c", "echo -n bar > /test/foo"},
 		Cmd:     []string{"sh", "-c", "echo -n bar > /test/foo"},
 		Volumes: map[string]struct{}{"/test": {}},
 		Volumes: map[string]struct{}{"/test": {}},
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1422,13 +1477,14 @@ func TestVolumesFromWithVolumes(t *testing.T) {
 		t.Fail()
 		t.Fail()
 	}
 	}
 
 
-	container2, err := runtime.Create(
+	container2, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:       GetTestImage(runtime).ID,
 			Image:       GetTestImage(runtime).ID,
 			Cmd:         []string{"cat", "/test/foo"},
 			Cmd:         []string{"cat", "/test/foo"},
 			VolumesFrom: container.ID,
 			VolumesFrom: container.ID,
 			Volumes:     map[string]struct{}{"/test": {}},
 			Volumes:     map[string]struct{}{"/test": {}},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1463,7 +1519,7 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	c, err := runtime.Create(config)
+	c, _, err := runtime.Create(config, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -1529,11 +1585,12 @@ func TestMultipleVolumesFrom(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
 
 
-	container, err := runtime.Create(&Config{
+	container, _, err := runtime.Create(&Config{
 		Image:   GetTestImage(runtime).ID,
 		Image:   GetTestImage(runtime).ID,
 		Cmd:     []string{"sh", "-c", "echo -n bar > /test/foo"},
 		Cmd:     []string{"sh", "-c", "echo -n bar > /test/foo"},
 		Volumes: map[string]struct{}{"/test": {}},
 		Volumes: map[string]struct{}{"/test": {}},
 	},
 	},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1556,12 +1613,13 @@ func TestMultipleVolumesFrom(t *testing.T) {
 		t.Fail()
 		t.Fail()
 	}
 	}
 
 
-	container2, err := runtime.Create(
+	container2, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:   GetTestImage(runtime).ID,
 			Image:   GetTestImage(runtime).ID,
 			Cmd:     []string{"sh", "-c", "echo -n bar > /other/foo"},
 			Cmd:     []string{"sh", "-c", "echo -n bar > /other/foo"},
 			Volumes: map[string]struct{}{"/other": {}},
 			Volumes: map[string]struct{}{"/other": {}},
 		},
 		},
+		"",
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1577,12 +1635,12 @@ func TestMultipleVolumesFrom(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	container3, err := runtime.Create(
+	container3, _, err := runtime.Create(
 		&Config{
 		&Config{
 			Image:       GetTestImage(runtime).ID,
 			Image:       GetTestImage(runtime).ID,
 			Cmd:         []string{"/bin/echo", "-n", "foobar"},
 			Cmd:         []string{"/bin/echo", "-n", "foobar"},
 			VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","),
 			VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","),
-		})
+		}, "")
 
 
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1593,7 +1651,6 @@ func TestMultipleVolumesFrom(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	t.Log(container3.Volumes)
 	if container3.Volumes["/test"] != container.Volumes["/test"] {
 	if container3.Volumes["/test"] != container.Volumes["/test"] {
 		t.Fail()
 		t.Fail()
 	}
 	}

+ 0 - 1
contrib/MAINTAINERS

@@ -1,2 +1 @@
-Kawsar Saiyeed <kawsar.saiyeed@projiris.com> (@KSid)
 Tianon Gravi <admwiggin@gmail.com> (@tianon)
 Tianon Gravi <admwiggin@gmail.com> (@tianon)

+ 0 - 1
contrib/brew/.gitignore

@@ -1 +0,0 @@
-*.pyc

+ 0 - 78
contrib/brew/README.md

@@ -1,78 +0,0 @@
-# docker-brew
-
-docker-brew is a command-line tool used to build the docker standard library.
-
-## Install instructions
-
-1. Install python if it isn't already available on your OS of choice
-1. Install the easy_install tool (`sudo apt-get install python-setuptools`
-for Debian)
-1. Install the python package manager, `pip` (`easy_install pip`)
-1. Run the following command: `sudo pip install -r requirements.txt`
-1. You should now be able to use the `docker-brew` script as such.
-
-## Basics
-
-	./docker-brew -h
-
-Display usage and help.
-
-	./docker-brew
-
-Default build from the default repo/branch. Images will be created under the
-`library/` namespace. Does not perform a remote push.
-
-	./docker-brew -n mycorp.com -b stable --push git://github.com/mycorp/docker
-
-Will fetch the library definition files in the `stable` branch of the
-`git://github.com/mycorp/docker` repository and create images under the
-`mycorp.com` namespace (e.g. `mycorp.com/ubuntu`). Created images will then
-be pushed to the official docker repository (pending: support for private
-repositories)
-
-## Library definition files
-
-The library definition files are plain text files found in the `library/`
-subfolder of the docker repository.
-
-### File names
-
-The name of a definition file will determine the name of the image(s) it
-creates. For example, the `library/ubuntu` file will create images in the
-`<namespace>/ubuntu` repository. If multiple instructions are present in
-a single file, all images are expected to be created under a different tag.
-
-### Instruction format
-
-Each line represents a build instruction.
-There are different formats that `docker-brew` is able to parse.
-
-	<git-url>
-	git://github.com/dotcloud/hipache
-	https://github.com/dotcloud/docker.git
-
-The simplest format. `docker-brew` will fetch data from the provided git
-repository from the `HEAD`of its `master` branch. Generated image will be
-tagged as `latest`. Use of this format is discouraged because there is no
-way to ensure stability.
-
-	<docker-tag> <git-url>
-	bleeding-edge git://github.com/dotcloud/docker
-	unstable https://github.com/dotcloud/docker-redis.git
-
-A more advanced format. `docker-brew` will fetch data from the provided git
-repository from the `HEAD`of its `master` branch. Generated image will be
-tagged as `<docker-tag>`. Recommended if we always want to provide a snapshot
-of the latest development. Again, no way to ensure stability.
-
-	<docker-tag>	<git-url>	T:<git-tag>
-	2.4.0 	git://github.com/dotcloud/docker-redis	T:2.4.0
-	<docker-tag>	<git-url>	B:<git-branch>
-	zfs		git://github.com/dotcloud/docker	B:zfs-support
-	<docker-tag>	<git-url>	C:<git-commit-id>
-	2.2.0 	https://github.com/dotcloud/docker-redis.git C:a4bf8923ee4ec566d3ddc212
-
-The most complete format. `docker-brew` will fetch data from the provided git
-repository from the provided reference (if it's a branch, brew will fetch its
-`HEAD`). Generated image will be tagged as `<docker-tag>`. Recommended whenever
-possible.

+ 0 - 1
contrib/brew/brew/__init__.py

@@ -1 +0,0 @@
-from brew import build_library, DEFAULT_REPOSITORY, DEFAULT_BRANCH

+ 0 - 185
contrib/brew/brew/brew.py

@@ -1,185 +0,0 @@
-import os
-import logging
-from shutil import rmtree
-
-import docker
-
-import git
-
-DEFAULT_REPOSITORY = 'git://github.com/dotcloud/docker'
-DEFAULT_BRANCH = 'master'
-
-logger = logging.getLogger(__name__)
-logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
-                    level='INFO')
-client = docker.Client()
-processed = {}
-processed_folders = []
-
-
-def build_library(repository=None, branch=None, namespace=None, push=False,
-        debug=False, prefill=True, registry=None):
-    dst_folder = None
-    summary = Summary()
-    if repository is None:
-        repository = DEFAULT_REPOSITORY
-    if branch is None:
-        branch = DEFAULT_BRANCH
-    if debug:
-        logger.setLevel('DEBUG')
-
-    if not (repository.startswith('https://') or repository.startswith('git://')):
-        logger.info('Repository provided assumed to be a local path')
-        dst_folder = repository
-
-    try:
-        client.version()
-    except Exception as e:
-        logger.error('Could not reach the docker daemon. Please make sure it '
-            'is running.')
-        logger.warning('Also make sure you have access to the docker UNIX '
-            'socket (use sudo)')
-        return
-
-    #FIXME: set destination folder and only pull latest changes instead of
-    # cloning the whole repo everytime
-    if not dst_folder:
-        logger.info('Cloning docker repo from {0}, branch: {1}'.format(
-            repository, branch))
-        try:
-            rep, dst_folder = git.clone_branch(repository, branch)
-        except Exception as e:
-            logger.exception(e)
-            logger.error('Source repository could not be fetched. Check '
-                'that the address is correct and the branch exists.')
-            return
-    try:
-        dirlist = os.listdir(os.path.join(dst_folder, 'library'))
-    except OSError as e:
-        logger.error('The path provided ({0}) could not be found or didn\'t'
-            'contain a library/ folder.'.format(dst_folder))
-        return
-    for buildfile in dirlist:
-        if buildfile == 'MAINTAINERS':
-            continue
-        f = open(os.path.join(dst_folder, 'library', buildfile))
-        linecnt = 0
-        for line in f:
-            linecnt = linecnt + 1
-            logger.debug('{0} ---> {1}'.format(buildfile, line))
-            args = line.split()
-            try:
-                if len(args) > 3:
-                    raise RuntimeError('Incorrect line format, '
-                        'please refer to the docs')
-
-                url = None
-                ref = 'refs/heads/master'
-                tag = None
-                if len(args) == 1:  # Just a URL, simple mode
-                    url = args[0]
-                elif len(args) == 2 or len(args) == 3:  # docker-tag   url
-                    url = args[1]
-                    tag = args[0]
-
-                if len(args) == 3:  # docker-tag  url     B:branch or T:tag
-                    ref = None
-                    if args[2].startswith('B:'):
-                        ref = 'refs/heads/' + args[2][2:]
-                    elif args[2].startswith('T:'):
-                        ref = 'refs/tags/' + args[2][2:]
-                    elif args[2].startswith('C:'):
-                        ref = args[2][2:]
-                    else:
-                        raise RuntimeError('Incorrect line format, '
-                            'please refer to the docs')
-                if prefill:
-                    logger.debug('Pulling {0} from official repository (cache '
-                        'fill)'.format(buildfile))
-                    client.pull(buildfile)
-                img = build_repo(url, ref, buildfile, tag, namespace, push,
-                    registry)
-                summary.add_success(buildfile, (linecnt, line), img)
-                processed['{0}@{1}'.format(url, ref)] = img
-            except Exception as e:
-                logger.exception(e)
-                summary.add_exception(buildfile, (linecnt, line), e)
-
-        f.close()
-    if dst_folder != repository:
-        rmtree(dst_folder, True)
-    for d in processed_folders:
-        rmtree(d, True)
-    summary.print_summary(logger)
-
-
-def build_repo(repository, ref, docker_repo, docker_tag, namespace, push, registry):
-    docker_repo = '{0}/{1}'.format(namespace or 'library', docker_repo)
-    img_id = None
-    dst_folder = None
-    if '{0}@{1}'.format(repository, ref) not in processed.keys():
-        logger.info('Cloning {0} (ref: {1})'.format(repository, ref))
-        if repository not in processed:
-            rep, dst_folder = git.clone(repository, ref)
-            processed[repository] = rep
-            processed_folders.append(dst_folder)
-        else:
-            dst_folder = git.checkout(processed[repository], ref)
-        if not 'Dockerfile' in os.listdir(dst_folder):
-            raise RuntimeError('Dockerfile not found in cloned repository')
-        logger.info('Building using dockerfile...')
-        img_id, logs = client.build(path=dst_folder, quiet=True)
-    else:
-        img_id = processed['{0}@{1}'.format(repository, ref)]
-    logger.info('Committing to {0}:{1}'.format(docker_repo,
-        docker_tag or 'latest'))
-    client.tag(img_id, docker_repo, docker_tag)
-    if push:
-        logger.info('Pushing result to registry {0}'.format(
-            registry or "default"))
-        if registry is not None:
-            docker_repo = '{0}/{1}'.format(registry, docker_repo)
-            logger.info('Also tagging {0}'.format(docker_repo))
-            client.tag(img_id, docker_repo, docker_tag)
-        client.push(docker_repo)
-    return img_id
-
-
-class Summary(object):
-    def __init__(self):
-        self._summary = {}
-        self._has_exc = False
-
-    def _add_data(self, image, linestr, data):
-        if image not in self._summary:
-            self._summary[image] = { linestr: data }
-        else:
-            self._summary[image][linestr] = data
-
-    def add_exception(self, image, line, exc):
-        lineno, linestr = line
-        self._add_data(image, linestr, { 'line': lineno, 'exc': str(exc) })
-        self._has_exc = True
-
-    def add_success(self, image, line, img_id):
-        lineno, linestr = line
-        self._add_data(image, linestr, { 'line': lineno, 'id': img_id })
-
-    def print_summary(self, logger=None):
-        linesep = ''.center(61, '-') + '\n'
-        s = 'BREW BUILD SUMMARY\n' + linesep
-        success = 'OVERALL SUCCESS: {}\n'.format(not self._has_exc)
-        details = linesep
-        for image, lines in self._summary.iteritems():
-            details = details + '{}\n{}'.format(image, linesep)
-            for linestr, data in lines.iteritems():
-                details = details + '{0:2} | {1} | {2:50}\n'.format(
-                    data['line'],
-                    'KO' if 'exc' in data else 'OK',
-                    data['exc'] if 'exc' in data else data['id']
-                )
-            details = details + linesep
-        if logger:
-            logger.info(s + success + details)
-        else:
-            print s, success, details

+ 0 - 63
contrib/brew/brew/git.py

@@ -1,63 +0,0 @@
-import tempfile
-import logging
-
-from dulwich import index
-from dulwich.client import get_transport_and_path
-from dulwich.repo import Repo
-
-logger = logging.getLogger(__name__)
-
-
-def clone_branch(repo_url, branch="master", folder=None):
-    return clone(repo_url, 'refs/heads/' + branch, folder)
-
-
-def clone_tag(repo_url, tag, folder=None):
-    return clone(repo_url, 'refs/tags/' + tag, folder)
-
-
-def checkout(rep, ref=None):
-    is_commit = False
-    if ref is None:
-        ref = 'refs/heads/master'
-    elif not ref.startswith('refs/'):
-        is_commit = True
-    if is_commit:
-        rep['HEAD'] = rep.commit(ref)
-    else:
-        rep['HEAD'] = rep.refs[ref]
-    indexfile = rep.index_path()
-    tree = rep["HEAD"].tree
-    index.build_index_from_tree(rep.path, indexfile, rep.object_store, tree)
-    return rep.path
-
-def clone(repo_url, ref=None, folder=None):
-    is_commit = False
-    if ref is None:
-        ref = 'refs/heads/master'
-    elif not ref.startswith('refs/'):
-        is_commit = True
-    logger.debug("clone repo_url={0}, ref={1}".format(repo_url, ref))
-    if folder is None:
-        folder = tempfile.mkdtemp()
-    logger.debug("folder = {0}".format(folder))
-    rep = Repo.init(folder)
-    client, relative_path = get_transport_and_path(repo_url)
-    logger.debug("client={0}".format(client))
-
-    remote_refs = client.fetch(relative_path, rep)
-    for k, v in remote_refs.iteritems():
-        try:
-            rep.refs.add_if_new(k, v)
-        except:
-            pass
-
-    if is_commit:
-        rep['HEAD'] = rep.commit(ref)
-    else:
-        rep['HEAD'] = remote_refs[ref]
-    indexfile = rep.index_path()
-    tree = rep["HEAD"].tree
-    index.build_index_from_tree(rep.path, indexfile, rep.object_store, tree)
-    logger.debug("done")
-    return rep, folder

+ 0 - 35
contrib/brew/docker-brew

@@ -1,35 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import sys
-
-try:
-    import brew
-except ImportError as e:
-    print str(e)
-    print 'Please install the required dependencies first'
-    print 'sudo pip install -r requirements.txt'
-    sys.exit(1)
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser('Build the docker standard library')
-    parser.add_argument('--push', action='store_true', default=False,
-        help='Push generated repositories')
-    parser.add_argument('--debug', default=False, action='store_true',
-        help='Enable debugging output')
-    parser.add_argument('--noprefill', default=True, action='store_false',
-        dest='prefill', help='Disable cache prefill')
-    parser.add_argument('-n', metavar='NAMESPACE', default='library',
-        help='Namespace used for generated repositories.'
-        ' Default is library')
-    parser.add_argument('-b', metavar='BRANCH', default=brew.DEFAULT_BRANCH,
-        help='Branch in the repository where the library definition'
-        ' files will be fetched. Default is ' + brew.DEFAULT_BRANCH)
-    parser.add_argument('repository', default=brew.DEFAULT_REPOSITORY,
-        nargs='?', help='git repository containing the library definition'
-        ' files. Default is ' + brew.DEFAULT_REPOSITORY)
-    parser.add_argument('--reg', default=None, help='Registry address to'
-        ' push build results to. Also sets push to true.')
-    args = parser.parse_args()
-    brew.build_library(args.repository, args.b, args.n,
-        args.push or args.reg is not None, args.debug, args.prefill, args.reg)

+ 0 - 2
contrib/brew/requirements.txt

@@ -1,2 +0,0 @@
-dulwich==0.9.0
--e git://github.com/dotcloud/docker-py.git#egg=docker-py

+ 0 - 22
contrib/brew/setup.py

@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-import os
-from setuptools import setup
-
-ROOT_DIR = os.path.dirname(__file__)
-SOURCE_DIR = os.path.join(ROOT_DIR)
-
-test_requirements = []
-setup(
-    name="docker-brew",
-    version='0.0.1',
-    description="-",
-    packages=['dockerbrew'],
-    install_requires=['dulwich', 'docker'] + test_requirements,
-    zip_safe=False,
-    classifiers=['Development Status :: 3 - Alpha',
-                 'Environment :: Other Environment',
-                 'Intended Audience :: Developers',
-                 'Operating System :: OS Independent',
-                 'Programming Language :: Python',
-                 'Topic :: Utilities'],
-    )

+ 1 - 1
contrib/docker.bash → contrib/completion/bash/docker

@@ -341,7 +341,7 @@ _docker_pull()
 
 
 _docker_push()
 _docker_push()
 {
 {
-	return
+	__docker_image_repos
 }
 }
 
 
 _docker_restart()
 _docker_restart()

+ 242 - 0
contrib/completion/zsh/_docker

@@ -0,0 +1,242 @@
+#compdef docker 
+#
+# zsh completion for docker (http://docker.io)
+#
+# version:  0.2.2
+# author:   Felix Riedel
+# license:  BSD License
+# github:   https://github.com/felixr/docker-zsh-completion
+#
+
+__parse_docker_list() {
+    sed -e '/^ID/d' -e 's/[ ]\{2,\}/|/g' -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/' | awk ' BEGIN {FS="|"} { printf("%s:%7s, %s\n", $1, $4, $2)}'
+}
+
+__docker_stoppedcontainers() {
+    local expl
+    declare -a stoppedcontainers 
+    stoppedcontainers=(${(f)"$(docker ps -a | grep --color=never 'Exit' |  __parse_docker_list )"})
+    _describe -t containers-stopped "Stopped Containers" stoppedcontainers 
+}
+
+__docker_runningcontainers() {
+    local expl
+    declare -a containers 
+
+    containers=(${(f)"$(docker ps | __parse_docker_list)"})
+    _describe -t containers-active "Running Containers" containers 
+}
+
+__docker_containers () {
+    __docker_stoppedcontainers 
+    __docker_runningcontainers
+}
+
+__docker_images () {
+    local expl
+    declare -a images
+    images=(${(f)"$(docker images | awk '(NR > 1){printf("%s\\:%s\n", $1,$2)}')"})
+    images=($images ${(f)"$(docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"})
+    _describe -t docker-images "Images" images
+}
+
+__docker_tags() {
+    local expl
+    declare -a tags
+    tags=(${(f)"$(docker images | awk '(NR>1){print $2}'| sort | uniq)"})
+    _describe -t docker-tags "tags" tags
+}
+
+__docker_search() {
+    # declare -a dockersearch
+    local cache_policy
+    zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
+    if [[ -z "$cache_policy" ]]; then
+        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy 
+    fi
+
+    local searchterm cachename
+    searchterm="${words[$CURRENT]%/}"
+    cachename=_docker-search-$searchterm
+
+    local expl
+    local -a result 
+    if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \
+        && ! _retrieve_cache ${cachename#_}; then
+        _message "Searching for ${searchterm}..."
+        result=(${(f)"$(docker search ${searchterm} | awk '(NR>2){print $1}')"})
+        _store_cache ${cachename#_} result
+    fi 
+    _wanted dockersearch expl 'Available images' compadd -a result 
+}
+
+__docker_caching_policy()
+{
+  # oldp=( "$1"(Nmh+24) )     # 24 hour
+  oldp=( "$1"(Nmh+1) )     # 24 hour
+  (( $#oldp ))
+}
+
+
+__docker_repositories () {
+    local expl
+    declare -a repos
+    repos=(${(f)"$(docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"})
+    _describe -t docker-repos "Repositories" repos
+}
+
+__docker_commands () {
+    # local -a  _docker_subcommands
+    local cache_policy
+
+    zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
+    if [[ -z "$cache_policy" ]]; then
+        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy 
+    fi
+
+    if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \
+        && ! _retrieve_cache docker_subcommands; 
+    then
+        _docker_subcommands=(${${(f)"$(_call_program commands 
+        docker 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}})
+        _docker_subcommands=($_docker_subcommands 'help:Show help for a command') 
+        _store_cache docker_subcommands _docker_subcommands
+    fi
+    _describe -t docker-commands "docker command" _docker_subcommands
+}
+
+__docker_subcommand () {
+    local -a _command_args
+    case "$words[1]" in
+        (attach|wait)
+            _arguments ':containers:__docker_runningcontainers'
+            ;;
+        (build)
+            _arguments \
+                '-t=-:repository:__docker_repositories' \
+                ':path or URL:_directories'
+            ;;
+        (commit)
+            _arguments \
+                ':container:__docker_containers' \
+                ':repository:__docker_repositories' \
+                ':tag: '
+            ;;
+        (diff|export|logs)
+            _arguments '*:containers:__docker_containers'
+            ;;
+        (history)
+            _arguments '*:images:__docker_images'
+            ;;
+        (images)
+            _arguments \
+                '-a[Show all images]' \
+                ':repository:__docker_repositories'
+            ;;
+        (inspect)
+            _arguments '*:containers:__docker_containers'
+            ;;
+        (history)
+            _arguments ':images:__docker_images'
+            ;;
+        (insert)
+            _arguments '1:containers:__docker_containers' \
+                       '2:URL:(http:// file://)' \
+                       '3:file:_files'
+            ;;
+        (kill)
+            _arguments '*:containers:__docker_runningcontainers'
+            ;;
+        (port)
+            _arguments '1:containers:__docker_runningcontainers'
+            ;;
+        (start)
+            _arguments '*:containers:__docker_stoppedcontainers'
+            ;;
+        (rm)
+            _arguments '-v[Remove the volumes associated to the container]' \
+                '*:containers:__docker_stoppedcontainers'
+            ;;
+        (rmi)
+            _arguments '-v[Remove the volumes associated to the container]' \
+                '*:images:__docker_images'
+            ;;
+        (top)
+            _arguments '1:containers:__docker_runningcontainers'
+            ;;
+        (restart|stop)
+            _arguments '-t=-[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \
+                '*:containers:__docker_runningcontainers'
+            ;;
+        (top)
+            _arguments ':containers:__docker_runningcontainers'
+            ;;
+        (ps)
+            _arguments '-a[Show all containers. Only running containers are shown by default]' \
+                '-h[Show help]' \
+                '-beforeId=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
+            '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)'
+            ;;
+        (tag)
+            _arguments \
+                '-f[force]'\
+                ':image:__docker_images'\
+                ':repository:__docker_repositories' \
+                ':tag:__docker_tags'
+            ;;
+        (run)
+            _arguments \
+                '-a=-[Attach to stdin, stdout or stderr]:toggle:(true false)' \
+                '-c=-[CPU shares (relative weight)]:CPU shares: ' \
+                '-d[Detached mode: leave the container running in the background]' \
+                '*-dns=[Set custom dns servers]:dns server: ' \
+                '*-e=[Set environment variables]:environment variable: ' \
+                '-entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \
+                '-h=-[Container host name]:hostname:_hosts' \
+                '-i[Keep stdin open even if not attached]' \
+                '-m=-[Memory limit (in bytes)]:limit: ' \
+                '*-p=-[Expose a container''s port to the host]:port:_ports' \
+                '-t=-[Allocate a pseudo-tty]:toggle:(true false)' \
+                '-u=-[Username or UID]:user:_users' \
+                '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\
+                '-volumes-from=-[Mount volumes from the specified container]:volume: ' \
+                '(-):images:__docker_images' \
+                '(-):command: _command_names -e' \
+                '*::arguments: _normal'
+                ;;
+        (pull|search)
+            _arguments ':name:__docker_search'
+            ;;
+        (help)
+            _arguments ':subcommand:__docker_commands'
+            ;;
+        (*)
+            _message 'Unknown sub command'
+    esac
+
+}
+
+_docker () {
+    local curcontext="$curcontext" state line
+    typeset -A opt_args
+
+    _arguments -C \
+      '-H=-[tcp://host:port to bind/connect to]:socket: ' \
+         '(-): :->command' \
+         '(-)*:: :->option-or-argument' 
+
+    if (( CURRENT == 1 )); then
+
+    fi
+    case $state in 
+        (command)
+            __docker_commands
+            ;;
+        (option-or-argument)
+            curcontext=${curcontext%:*:*}:docker-$words[1]:
+            __docker_subcommand 
+            ;;
+    esac
+}
+
+_docker "$@"

+ 27 - 0
contrib/host-integration/Dockerfile.dev

@@ -0,0 +1,27 @@
+#
+# This Dockerfile will create an image that allows to generate upstart and
+# systemd scripts (more to come)
+#
+# docker-version 0.6.2
+#
+
+FROM		ubuntu:12.10
+MAINTAINER	Guillaume J. Charmes <guillaume@dotcloud.com>
+
+RUN		apt-get update && apt-get install -y wget git mercurial
+
+# Install Go
+RUN		wget --no-check-certificate https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz -O go-1.1.2.tar.gz
+RUN		tar -xzvf go-1.1.2.tar.gz && mv /go /goroot
+RUN		mkdir /go
+
+ENV		GOROOT	  /goroot
+ENV		GOPATH	  /go
+ENV		PATH	  $GOROOT/bin:$PATH
+
+RUN		go get github.com/dotcloud/docker && cd /go/src/github.com/dotcloud/docker && git checkout v0.6.3
+ADD		manager.go	/manager/
+RUN		cd /manager && go build -o /usr/bin/manager
+
+ENTRYPOINT	["/usr/bin/manager"]
+

+ 4 - 0
contrib/host-integration/Dockerfile.min

@@ -0,0 +1,4 @@
+FROM		busybox
+MAINTAINER	Guillaume J. Charmes <guillaume@dotcloud.com>
+ADD		manager	  /usr/bin/
+ENTRYPOINT	["/usr/bin/manager"]

+ 130 - 0
contrib/host-integration/manager.go

@@ -0,0 +1,130 @@
+package main
+
+import (
+	"bytes"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"github.com/dotcloud/docker"
+	"os"
+	"strings"
+	"text/template"
+)
+
+var templates = map[string]string{
+
+	"upstart": `description "{{.description}}"
+author "{{.author}}"
+start on filesystem and started lxc-net and started docker
+stop on runlevel [!2345]
+respawn
+exec /home/vagrant/goroot/bin/docker start -a {{.container_id}}
+`,
+
+	"systemd": `[Unit]
+	Description={{.description}}
+	Author={{.author}}
+	After=docker.service
+
+[Service]
+	Restart=always
+	ExecStart=/usr/bin/docker start -a {{.container_id}}
+	ExecStop=/usr/bin/docker stop -t 2 {{.container_id}}
+
+[Install]
+	WantedBy=local.target
+`,
+}
+
+func main() {
+	// Parse command line for custom options
+	kind := flag.String("t", "upstart", "Type of manager requested")
+	author := flag.String("a", "<none>", "Author of the image")
+	description := flag.String("d", "<none>", "Description of the image")
+	flag.Usage = func() {
+		fmt.Fprintf(os.Stderr, "\nUsage: manager <container id>\n\n")
+		flag.PrintDefaults()
+	}
+	flag.Parse()
+
+	// We require at least the container ID
+	if flag.NArg() != 1 {
+		println(flag.NArg())
+		flag.Usage()
+		return
+	}
+
+	// Check that the requested process manager is supported
+	if _, exists := templates[*kind]; !exists {
+		panic("Unkown script template")
+	}
+
+	// Load the requested template
+	tpl, err := template.New("processManager").Parse(templates[*kind])
+	if err != nil {
+		panic(err)
+	}
+
+	// Create stdout/stderr buffers
+	bufOut := bytes.NewBuffer(nil)
+	bufErr := bytes.NewBuffer(nil)
+
+	// Instanciate the Docker CLI
+	cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock")
+	// Retrieve the container info
+	if err := cli.CmdInspect(flag.Arg(0)); err != nil {
+		// As of docker v0.6.3, CmdInspect always returns nil
+		panic(err)
+	}
+
+	// If there is nothing in the error buffer, then the Docker daemon is there and the container has been found
+	if bufErr.Len() == 0 {
+		// Unmarshall the resulting container data
+		c := []*docker.Container{{}}
+		if err := json.Unmarshal(bufOut.Bytes(), &c); err != nil {
+			panic(err)
+		}
+		// Reset the buffers
+		bufOut.Reset()
+		bufErr.Reset()
+		// Retrieve the info of the linked image
+		if err := cli.CmdInspect(c[0].Image); err != nil {
+			panic(err)
+		}
+		// If there is nothing in the error buffer, then the image has been found.
+		if bufErr.Len() == 0 {
+			// Unmarshall the resulting image data
+			img := []*docker.Image{{}}
+			if err := json.Unmarshal(bufOut.Bytes(), &img); err != nil {
+				panic(err)
+			}
+			// If no author has been set, use the one from the image
+			if *author == "<none>" && img[0].Author != "" {
+				*author = strings.Replace(img[0].Author, "\"", "", -1)
+			}
+			// If no description has been set, use the comment from the image
+			if *description == "<none>" && img[0].Comment != "" {
+				*description = strings.Replace(img[0].Comment, "\"", "", -1)
+			}
+		}
+	}
+
+	/// Old version: Wrtie the resulting script to file
+	// f, err := os.OpenFile(kind, os.O_CREATE|os.O_WRONLY, 0755)
+	// if err != nil {
+	// 	panic(err)
+	// }
+	// defer f.Close()
+
+	// Create a map with needed data
+	data := map[string]string{
+		"author":       *author,
+		"description":  *description,
+		"container_id": flag.Arg(0),
+	}
+
+	// Process the template and output it on Stdout
+	if err := tpl.Execute(os.Stdout, data); err != nil {
+		panic(err)
+	}
+}

+ 53 - 0
contrib/host-integration/manager.sh

@@ -0,0 +1,53 @@
+#!/bin/sh
+set -e
+
+usage() {
+	echo >&2 "usage: $0 [-a author] [-d description] container [manager]"
+	echo >&2 "   ie: $0 -a 'John Smith' 4ec9612a37cd systemd"
+	echo >&2 "   ie: $0 -d 'Super Cool System' 4ec9612a37cd # defaults to upstart"
+	exit 1
+}
+
+auth='<none>'
+desc='<none>'
+have_auth=
+have_desc=
+while getopts a:d: opt; do
+	case "$opt" in
+		a)
+			auth="$OPTARG"
+			have_auth=1
+			;;
+		d)
+			desc="$OPTARG"
+			have_desc=1
+			;;
+	esac
+done
+shift $(($OPTIND - 1))
+
+[ $# -ge 1 -a $# -le 2 ] || usage
+
+cid="$1"
+script="${2:-upstart}"
+if [ ! -e "manager/$script" ]; then
+	echo >&2 "Error: manager type '$script' is unknown (PRs always welcome!)."
+	echo >&2 'The currently supported types are:'
+	echo >&2 "  $(cd manager && echo *)"
+	exit 1
+fi
+
+# TODO https://github.com/dotcloud/docker/issues/734 (docker inspect formatting)
+#if command -v docker > /dev/null 2>&1; then
+#	image="$(docker inspect -f '{{.Image}}' "$cid")"
+#	if [ "$image" ]; then
+#		if [ -z "$have_auth" ]; then
+#			auth="$(docker inspect -f '{{.Author}}' "$image")"
+#		fi
+#		if [ -z "$have_desc" ]; then
+#			desc="$(docker inspect -f '{{.Comment}}' "$image")"
+#		fi
+#	fi
+#fi
+
+exec "manager/$script" "$cid" "$auth" "$desc"

+ 20 - 0
contrib/host-integration/manager/systemd

@@ -0,0 +1,20 @@
+#!/bin/sh
+set -e
+
+cid="$1"
+auth="$2"
+desc="$3"
+
+cat <<-EOF
+	[Unit]
+	Description=$desc
+	Author=$auth
+	After=docker.service
+	
+	[Service]
+	ExecStart=/usr/bin/docker start -a $cid
+	ExecStop=/usr/bin/docker stop -t 2 $cid
+	
+	[Install]
+	WantedBy=local.target
+EOF

+ 15 - 0
contrib/host-integration/manager/upstart

@@ -0,0 +1,15 @@
+#!/bin/sh
+set -e
+
+cid="$1"
+auth="$2"
+desc="$3"
+
+cat <<-EOF
+	description "$(echo "$desc" | sed 's/"/\\"/g')"
+	author "$(echo "$auth" | sed 's/"/\\"/g')"
+	start on filesystem and started lxc-net and started docker
+	stop on runlevel [!2345]
+	respawn
+	exec /usr/bin/docker start -a "$cid"
+EOF

+ 13 - 0
contrib/init/openrc/docker.confd

@@ -0,0 +1,13 @@
+# /etc/conf.d/docker: config file for /etc/init.d/docker
+
+# where the docker daemon output gets piped
+#DOCKER_LOGFILE="/var/log/docker.log"
+
+# where docker's pid get stored
+#DOCKER_PIDFILE="/run/docker.pid"
+
+# where the docker daemon itself is run from
+#DOCKER_BINARY="/usr/bin/docker"
+
+# any other random options you want to pass to docker
+DOCKER_OPTS=""

+ 31 - 0
contrib/init/openrc/docker.initd

@@ -0,0 +1,31 @@
+#!/sbin/runscript
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+DOCKER_LOGFILE=${DOCKER_LOGFILE:-/var/log/${SVCNAME}.log}
+DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid}
+DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker}
+DOCKER_OPTS=${DOCKER_OPTS:-}
+
+start() {
+	checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE"
+
+	ebegin "Starting docker daemon"
+	start-stop-daemon --start --background \
+		--exec "$DOCKER_BINARY" \
+		--pidfile "$DOCKER_PIDFILE" \
+		--stdout "$DOCKER_LOGFILE" \
+		--stderr "$DOCKER_LOGFILE" \
+		-- -d -p "$DOCKER_PIDFILE" \
+		$DOCKER_OPTS
+	eend $?
+}
+
+stop() {
+	ebegin "Stopping docker daemon"
+	start-stop-daemon --stop \
+		--exec "$DOCKER_BINARY" \
+		--pidfile "$DOCKER_PIDFILE"
+	eend $?
+}

+ 13 - 0
contrib/init/systemd/docker.service

@@ -0,0 +1,13 @@
+[Unit]
+Description=Easily create lightweight, portable, self-sufficient containers from any application!
+Documentation=http://docs.docker.io
+Requires=network.target
+After=multi-user.target
+
+[Service]
+Type=simple
+ExecStartPre=/bin/mount --make-rprivate /
+ExecStart=/usr/bin/docker -d
+
+[Install]
+WantedBy=multi-user.target

+ 85 - 0
contrib/init/sysvinit/docker

@@ -0,0 +1,85 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides:           docker
+# Required-Start:     $syslog $remote_fs
+# Required-Stop:      $syslog $remote_fs
+# Default-Start:      2 3 4 5
+# Default-Stop:       0 1 6
+# Short-Description:  Linux container runtime
+# Description:        Linux container runtime
+### END INIT INFO
+
+DOCKER=/usr/bin/docker
+DOCKER_PIDFILE=/var/run/docker.pid
+DOCKER_OPTS=
+
+PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
+
+# Check lxc-docker is present
+[ -x $DOCKER ] || (log_failure_msg "docker not present"; exit 1)
+
+# Get lsb functions
+. /lib/lsb/init-functions
+
+if [ -f /etc/default/lxc ]; then
+	. /etc/default/lxc
+fi
+
+if [ "$1" = start ] && which initctl >/dev/null && initctl version | grep -q upstart; then
+	exit 1
+fi
+
+check_root_id ()
+{
+	if [ "$(id -u)" != "0" ]; then
+		log_failure_msg "Docker must be run as root"; exit 1
+	fi
+}
+
+case "$1" in
+	start)
+		check_root_id || exit 1
+		log_begin_msg "Starting Docker"
+		mount | grep cgroup >/dev/null || mount -t cgroup none /sys/fs/cgroup 2>/dev/null
+		start-stop-daemon --start --background $NO_CLOSE \
+			--exec "$DOCKER" \
+			--pidfile "$DOCKER_PIDFILE" \
+			-- -d -p "$DOCKER_PIDFILE" \
+			$DOCKER_OPTS
+		log_end_msg $?
+		;;
+
+	stop)
+		check_root_id || exit 1
+		log_begin_msg "Stopping Docker"
+		start-stop-daemon --stop \
+			--pidfile "$DOCKER_PIDFILE"
+		log_end_msg $?
+		;;
+
+	restart)
+		check_root_id || exit 1
+		docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null`
+		[ -n "$docker_pid" ] \
+			&& ps -p $docker_pid > /dev/null 2>&1 \
+			&& $0 stop
+		$0 start
+		;;
+
+	force-reload)
+		check_root_id || exit 1
+		$0 restart
+		;;
+
+	status)
+		status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker
+		;;
+
+	*)
+		echo "Usage: $0 {start|stop|restart|status}"
+		exit 1
+		;;
+esac
+
+exit 0

+ 2 - 2
packaging/ubuntu/docker.upstart → contrib/init/upstart/docker.conf

@@ -1,4 +1,4 @@
-description     "Run docker"
+description "Docker daemon"
 
 
 start on filesystem and started lxc-net
 start on filesystem and started lxc-net
 stop on runlevel [!2345]
 stop on runlevel [!2345]
@@ -6,5 +6,5 @@ stop on runlevel [!2345]
 respawn
 respawn
 
 
 script
 script
-    /usr/bin/docker -d
+	/usr/bin/docker -d
 end script
 end script

+ 0 - 61
contrib/install.sh

@@ -1,61 +0,0 @@
-#!/bin/sh
-# This script is meant for quick & easy install via 'curl URL-OF-SCRIPT | sh'
-# Original version by Jeff Lindsay <progrium@gmail.com>
-# Revamped by Jerome Petazzoni <jerome@dotcloud.com>
-#
-# This script canonical location is https://get.docker.io/; to update it, run:
-# s3cmd put -m text/x-shellscript -P install.sh s3://get.docker.io/index
-
-echo "Ensuring basic dependencies are installed..."
-apt-get -qq update
-apt-get -qq install lxc wget
-
-echo "Looking in /proc/filesystems to see if we have AUFS support..."
-if grep -q aufs /proc/filesystems
-then
-    echo "Found."
-else
-    echo "Ahem, it looks like the current kernel does not support AUFS."
-    echo "Let's see if we can load the AUFS module with modprobe..."
-    if modprobe aufs
-    then
-        echo "Module loaded."
-    else
-        echo "Ahem, things didn't turn out as expected."
-        KPKG=linux-image-extra-$(uname -r)
-        echo "Trying to install $KPKG..."
-        if apt-get -qq install $KPKG
-        then
-            echo "Installed."
-        else
-            echo "Oops, we couldn't install the -extra kernel."
-            echo "Are you sure you are running a supported version of Ubuntu?"
-            echo "Proceeding anyway, but Docker will probably NOT WORK!"
-        fi
-    fi
-fi
-
-echo "Downloading docker binary to /usr/local/bin..."
-curl -s https://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-latest \
-    > /usr/local/bin/docker
-chmod +x /usr/local/bin/docker
-
-if [ -f /etc/init/dockerd.conf ]
-then
-  echo "Upstart script already exists."
-else
-  echo "Creating /etc/init/dockerd.conf..."
-  cat >/etc/init/dockerd.conf <<EOF
-description "Docker daemon"
-start on filesystem and started lxc-net
-stop on runlevel [!2345]
-respawn
-exec /usr/local/bin/docker -d
-EOF
-fi
-
-echo "Starting dockerd..."
-start dockerd > /dev/null
-
-echo "Done."
-echo

+ 67 - 0
contrib/mkimage-arch.sh

@@ -0,0 +1,67 @@
+#!/bin/bash
+# Generate a minimal filesystem for archlinux and load it into the local
+# docker as "archlinux"
+# requires root
+set -e
+
+PACSTRAP=$(which pacstrap)
+[ "$PACSTRAP" ] || {
+    echo "Could not find pacstrap. Run pacman -S arch-install-scripts"
+    exit 1
+}
+EXPECT=$(which expect)
+[ "$EXPECT" ] || {
+    echo "Could not find expect. Run pacman -S expect"
+    exit 1
+}
+
+ROOTFS=~/rootfs-arch-$$-$RANDOM
+mkdir $ROOTFS
+
+#packages to ignore for space savings
+PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs
+ 
+expect <<EOF
+  set timeout 60
+  set send_slow {1 1}
+  spawn pacstrap -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
+  expect {
+    "Install anyway?" { send n\r; exp_continue }
+    "(default=all)" { send \r; exp_continue }
+    "Proceed with installation?" { send "\r"; exp_continue }
+    "skip the above package" {send "y\r"; exp_continue }
+    "checking" { exp_continue }
+    "loading" { exp_continue }
+    "installing" { exp_continue }
+  }
+EOF
+
+arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux"
+arch-chroot $ROOTFS /bin/sh -c "ln -s /usr/share/zoneinfo/UTC /etc/localtime"
+cat > $ROOTFS/etc/locale.gen <<DELIM
+en_US.UTF-8 UTF-8
+en_US ISO-8859-1
+DELIM
+arch-chroot $ROOTFS locale-gen
+arch-chroot $ROOTFS /bin/sh -c 'echo "Server = http://mirrors.kernel.org/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist'
+
+# udev doesn't work in containers, rebuild /dev
+DEV=${ROOTFS}/dev
+mv ${DEV} ${DEV}.old
+mkdir -p ${DEV}
+mknod -m 666 ${DEV}/null c 1 3
+mknod -m 666 ${DEV}/zero c 1 5
+mknod -m 666 ${DEV}/random c 1 8
+mknod -m 666 ${DEV}/urandom c 1 9
+mkdir -m 755 ${DEV}/pts
+mkdir -m 1777 ${DEV}/shm
+mknod -m 666 ${DEV}/tty c 5 0
+mknod -m 600 ${DEV}/console c 5 1
+mknod -m 666 ${DEV}/tty0 c 4 0
+mknod -m 666 ${DEV}/full c 1 7
+mknod -m 600 ${DEV}/initctl p
+mknod -m 666 ${DEV}/ptmx c 5 2
+
+tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux
+docker run -i -t archlinux echo Success.
+rm -rf $ROOTFS

+ 1 - 1
contrib/mkimage-busybox.sh

@@ -35,5 +35,5 @@ do
     cp -a /dev/$X dev
     cp -a /dev/$X dev
 done
 done
 
 
-tar -cf- . | docker import - busybox
+tar --numeric-owner -cf- . | docker import - busybox
 docker run -i -u root busybox /bin/echo Success.
 docker run -i -u root busybox /bin/echo Success.

+ 0 - 66
contrib/mkimage-debian.sh

@@ -1,66 +0,0 @@
-#!/bin/bash
-set -e
-
-# these should match the names found at http://www.debian.org/releases/
-stableSuite='wheezy'
-testingSuite='jessie'
-unstableSuite='sid'
-
-variant='minbase'
-include='iproute,iputils-ping'
-
-repo="$1"
-suite="${2:-$stableSuite}"
-mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided
-
-if [ ! "$repo" ]; then
-	echo >&2 "usage: $0 repo [suite [mirror]]"
-	echo >&2 "   ie: $0 tianon/debian squeeze"
-	exit 1
-fi
-
-target="/tmp/docker-rootfs-debian-$suite-$$-$RANDOM"
-
-cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
-returnTo="$(pwd -P)"
-
-set -x
-
-# bootstrap
-mkdir -p "$target"
-sudo debootstrap --verbose --variant="$variant" --include="$include" "$suite" "$target" "$mirror"
-
-cd "$target"
-
-# prevent init scripts from running during install/update
-echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null
-sudo chmod +x usr/sbin/policy-rc.d
-# see https://github.com/dotcloud/docker/issues/446#issuecomment-16953173
-
-# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
-sudo chroot . apt-get clean
-
-# while we're at it, apt is unnecessarily slow inside containers
-#  this forces dpkg not to call sync() after package extraction and speeds up install
-echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
-#  we don't need an apt cache in a container
-echo 'Acquire::http {No-Cache=True;};' | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
-
-# create the image (and tag $repo:$suite)
-sudo tar -c . | docker import - $repo $suite
-
-# test the image
-docker run -i -t $repo:$suite echo success
-
-if [ "$suite" = "$stableSuite" -o "$suite" = 'stable' ]; then
-	# tag latest
-	docker tag $repo:$suite $repo latest
-	
-	# tag the specific debian release version
-	ver=$(docker run $repo:$suite cat /etc/debian_version)
-	docker tag $repo:$suite $repo $ver
-fi
-
-# cleanup
-cd "$returnTo"
-sudo rm -rf "$target"

+ 233 - 0
contrib/mkimage-debootstrap.sh

@@ -0,0 +1,233 @@
+#!/bin/bash
+set -e
+
+variant='minbase'
+include='iproute,iputils-ping'
+arch='amd64' # intentionally undocumented for now
+skipDetection=
+strictDebootstrap=
+justTar=
+
+usage() {
+	echo >&2
+	
+	echo >&2 "usage: $0 [options] repo suite [mirror]"
+	
+	echo >&2
+	echo >&2 'options: (not recommended)'
+	echo >&2 "  -p set an http_proxy for debootstrap"
+	echo >&2 "  -v $variant # change default debootstrap variant"
+	echo >&2 "  -i $include # change default package includes"
+	echo >&2 "  -d # strict debootstrap (do not apply any docker-specific tweaks)"
+	echo >&2 "  -s # skip version detection and tagging (ie, precise also tagged as 12.04)"
+	echo >&2 "     # note that this will also skip adding universe and/or security/updates to sources.list"
+	echo >&2 "  -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)"
+	
+	echo >&2
+	echo >&2 "   ie: $0 username/debian squeeze"
+	echo >&2 "       $0 username/debian squeeze http://ftp.uk.debian.org/debian/"
+	
+	echo >&2
+	echo >&2 "   ie: $0 username/ubuntu precise"
+	echo >&2 "       $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/"
+	
+	echo >&2
+	echo >&2 "   ie: $0 -t precise.tar.bz2 precise"
+	echo >&2 "       $0 -t wheezy.tgz wheezy"
+	echo >&2 "       $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/"
+	
+	echo >&2
+}
+
+# these should match the names found at http://www.debian.org/releases/
+debianStable=wheezy
+debianUnstable=sid
+# this should match the name found at http://releases.ubuntu.com/
+ubuntuLatestLTS=precise
+
+while getopts v:i:a:p:dst name; do
+	case "$name" in
+		p)
+			http_proxy="$OPTARG"
+			;;
+		v)
+			variant="$OPTARG"
+			;;
+		i)
+			include="$OPTARG"
+			;;
+		a)
+			arch="$OPTARG"
+			;;
+		d)
+			strictDebootstrap=1
+			;;
+		s)
+			skipDetection=1
+			;;
+		t)
+			justTar=1
+			;;
+		?)
+			usage
+			exit 0
+			;;
+	esac
+done
+shift $(($OPTIND - 1))
+
+repo="$1"
+suite="$2"
+mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided
+
+if [ ! "$repo" ] || [ ! "$suite" ]; then
+	usage
+	exit 1
+fi
+
+# some rudimentary detection for whether we need to "sudo" our docker calls
+docker=''
+if docker version > /dev/null 2>&1; then
+	docker='docker'
+elif sudo docker version > /dev/null 2>&1; then
+	docker='sudo docker'
+elif command -v docker > /dev/null 2>&1; then
+	docker='docker'
+else
+	echo >&2 "warning: either docker isn't installed, or your current user cannot run it;"
+	echo >&2 "         this script is not likely to work as expected"
+	sleep 3
+	docker='docker' # give us a command-not-found later
+fi
+
+# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory
+if [ "$justTar" ]; then
+	if [ ! -d "$(dirname "$repo")" ]; then
+		echo >&2 "error: $(dirname "$repo") does not exist"
+		exit 1
+	fi
+	repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")"
+fi
+
+# will be filled in later, if [ -z "$skipDetection" ]
+lsbDist=''
+
+target="/tmp/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+returnTo="$(pwd -P)"
+
+set -x
+
+# bootstrap
+mkdir -p "$target"
+sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror"
+
+cd "$target"
+
+if [ -z "$strictDebootstrap" ]; then
+	# prevent init scripts from running during install/update
+	#  policy-rc.d (for most scripts)
+	echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null
+	sudo chmod +x usr/sbin/policy-rc.d
+	#  initctl (for some pesky upstart scripts)
+	sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
+	sudo ln -sf /bin/true sbin/initctl
+	# see https://github.com/dotcloud/docker/issues/446#issuecomment-16953173
+	
+	# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
+	sudo chroot . apt-get clean
+	
+	# while we're at it, apt is unnecessarily slow inside containers
+	#  this forces dpkg not to call sync() after package extraction and speeds up install
+	#    the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
+	echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
+	#  we want to effectively run "apt-get clean" after every install to keep images small
+	echo 'DPkg::Post-Invoke {"/bin/rm -f /var/cache/apt/archives/*.deb || true";};' | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
+	
+	# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
+	#  rm /usr/sbin/policy-rc.d
+	#  rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
+	#  rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
+	#  rm /etc/apt/apt.conf.d/no-cache
+	
+	if [ -z "$skipDetection" ]; then
+		# see also rudimentary platform detection in hack/install.sh
+		lsbDist=''
+		if [ -r etc/lsb-release ]; then
+			lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")"
+		fi
+		if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then
+			lsbDist='Debian'
+		fi
+		
+		case "$lsbDist" in
+			Debian)
+				# add the updates and security repositories
+				if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then
+					# ${suite}-updates only applies to non-unstable
+					sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
+					
+					# same for security updates
+					echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null
+				fi
+				;;
+			Ubuntu)
+				# add the universe, updates, and security repositories
+				sudo sed -i "
+					s/ $suite main$/ $suite main universe/; p;
+					s/ $suite main/ ${suite}-updates main/; p;
+					s/ $suite-updates main/ ${suite}-security main/
+				" etc/apt/sources.list
+				;;
+		esac
+	fi
+fi
+
+if [ "$justTar" ]; then
+	# create the tarball file so it has the right permissions (ie, not root)
+	touch "$repo"
+	
+	# fill the tarball
+	sudo tar --numeric-owner -caf "$repo" .
+else
+	# create the image (and tag $repo:$suite)
+	sudo tar --numeric-owner -c . | $docker import - $repo $suite
+	
+	# test the image
+	$docker run -i -t $repo:$suite echo success
+	
+	if [ -z "$skipDetection" ]; then
+		case "$lsbDist" in
+			Debian)
+				if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
+					# tag latest
+					$docker tag $repo:$suite $repo latest
+					
+					if [ -r etc/debian_version ]; then
+						# tag the specific debian release version (which is only reasonable to tag on debian stable)
+						ver=$(cat etc/debian_version)
+						$docker tag $repo:$suite $repo $ver
+					fi
+				fi
+				;;
+			Ubuntu)
+				if [ "$suite" = "$ubuntuLatestLTS" ]; then
+					# tag latest
+					$docker tag $repo:$suite $repo latest
+				fi
+				if [ -r etc/lsb-release ]; then
+					lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
+					if [ "$lsbRelease" ]; then
+						# tag specific Ubuntu version number, if available (12.04, etc.)
+						$docker tag $repo:$suite $repo $lsbRelease
+					fi
+				fi
+				;;
+		esac
+	fi
+fi
+
+# cleanup
+cd "$returnTo"
+sudo rm -rf "$target"

+ 1 - 1
contrib/mkimage-unittest.sh

@@ -44,6 +44,6 @@ do
 done
 done
 
 
 chmod 0755 $ROOTFS # See #486
 chmod 0755 $ROOTFS # See #486
-tar -cf- . | docker import - docker-ut
+tar --numeric-owner -cf- . | docker import - docker-ut
 docker run -i -u root docker-ut /bin/echo Success.
 docker run -i -u root docker-ut /bin/echo Success.
 rm -rf $ROOTFS
 rm -rf $ROOTFS

+ 22 - 0
contrib/vim-syntax/LICENSE

@@ -0,0 +1,22 @@
+Copyright (c) 2013 Honza Pokorny
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 23 - 0
contrib/vim-syntax/README.md

@@ -0,0 +1,23 @@
+dockerfile.vim
+==============
+
+Syntax highlighting for Dockerfiles
+
+Installation
+------------
+
+Via pathogen, the usual way...
+
+Features
+--------
+
+The syntax highlighting includes:
+
+* The directives (e.g. `FROM`)
+* Strings
+* Comments
+
+License
+-------
+
+BSD, short and sweet

+ 18 - 0
contrib/vim-syntax/doc/dockerfile.txt

@@ -0,0 +1,18 @@
+*dockerfile.txt*  Syntax highlighting for Dockerfiles
+
+Author: Honza Pokorny <http://honza.ca>
+License: BSD
+
+INSTALLATION                                                     *installation*
+
+Drop it on your Pathogen path and you're all set.
+
+FEATURES                                                             *features*
+
+The syntax highlighting includes:
+
+* The directives (e.g. FROM)
+* Strings
+* Comments
+
+ vim:tw=78:et:ft=help:norl:

+ 1 - 0
contrib/vim-syntax/ftdetect/dockerfile.vim

@@ -0,0 +1 @@
+au BufNewFile,BufRead Dockerfile set filetype=dockerfile

+ 24 - 0
contrib/vim-syntax/syntax/dockerfile.vim

@@ -0,0 +1,24 @@
+" dockerfile.vim - Syntax highlighting for Dockerfiles
+" Maintainer:   Honza Pokorny <http://honza.ca>
+" Version:      0.5
+
+
+if exists("b:current_syntax")
+    finish
+endif
+
+let b:current_syntax = "dockerfile"
+
+syntax case ignore
+
+syntax match dockerfileKeyword /\v^\s*(FROM|MAINTAINER|RUN|CMD|EXPOSE|ENV|ADD)\s/
+syntax match dockerfileKeyword /\v^\s*(ENTRYPOINT|VOLUME|USER|WORKDIR)\s/
+highlight link dockerfileKeyword Keyword
+
+syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/
+highlight link dockerfileString String
+
+syntax match dockerfileComment "\v^\s*#.*$"
+highlight link dockerfileComment Comment
+
+set commentstring=#\ %s

+ 54 - 26
docker/docker.go

@@ -4,9 +4,11 @@ import (
 	"flag"
 	"flag"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker"
 	"github.com/dotcloud/docker"
+	"github.com/dotcloud/docker/sysinit"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"io/ioutil"
 	"log"
 	"log"
+	"net"
 	"os"
 	"os"
 	"os/signal"
 	"os/signal"
 	"strconv"
 	"strconv"
@@ -22,7 +24,7 @@ var (
 func main() {
 func main() {
 	if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" {
 	if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" {
 		// Running in init mode
 		// Running in init mode
-		docker.SysInit()
+		sysinit.SysInit()
 		return
 		return
 	}
 	}
 	// FIXME: Switch d and D ? (to be more sshd like)
 	// FIXME: Switch d and D ? (to be more sshd like)
@@ -35,9 +37,14 @@ func main() {
 	flGraphPath := flag.String("g", "/var/lib/docker", "Path to graph storage base dir.")
 	flGraphPath := flag.String("g", "/var/lib/docker", "Path to graph storage base dir.")
 	flEnableCors := flag.Bool("api-enable-cors", false, "Enable CORS requests in the remote api.")
 	flEnableCors := flag.Bool("api-enable-cors", false, "Enable CORS requests in the remote api.")
 	flDns := flag.String("dns", "", "Set custom dns servers")
 	flDns := flag.String("dns", "", "Set custom dns servers")
-	flHosts := docker.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
+	flHosts := utils.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
 	flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
 	flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
+	flEnableIptables := flag.Bool("iptables", true, "Disable iptables within docker")
+	flDefaultIp := flag.String("ip", "0.0.0.0", "Default ip address to use when binding a containers ports")
+	flInterContainerComm := flag.Bool("icc", true, "Enable inter-container communication")
+
 	flag.Parse()
 	flag.Parse()
+
 	if *flVersion {
 	if *flVersion {
 		showVersion()
 		showVersion()
 		return
 		return
@@ -46,13 +53,17 @@ func main() {
 		flHosts = flHosts[1:] //trick to display a nice default value in the usage
 		flHosts = flHosts[1:] //trick to display a nice default value in the usage
 	}
 	}
 	for i, flHost := range flHosts {
 	for i, flHost := range flHosts {
-		flHosts[i] = utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)
+		host, err := utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)
+		if err == nil {
+			flHosts[i] = host
+		} else {
+			log.Fatal(err)
+		}
 	}
 	}
 
 
+	bridge := docker.DefaultNetworkBridge
 	if *bridgeName != "" {
 	if *bridgeName != "" {
-		docker.NetworkBridgeIface = *bridgeName
-	} else {
-		docker.NetworkBridgeIface = docker.DefaultNetworkBridge
+		bridge = *bridgeName
 	}
 	}
 	if *flDebug {
 	if *flDebug {
 		os.Setenv("DEBUG", "1")
 		os.Setenv("DEBUG", "1")
@@ -64,14 +75,31 @@ func main() {
 			flag.Usage()
 			flag.Usage()
 			return
 			return
 		}
 		}
-		if err := daemon(*pidfile, *flGraphPath, flHosts, *flAutoRestart, *flEnableCors, *flDns); err != nil {
+		var dns []string
+		if *flDns != "" {
+			dns = []string{*flDns}
+		}
+
+		ip := net.ParseIP(*flDefaultIp)
+
+		config := &docker.DaemonConfig{
+			Pidfile:                     *pidfile,
+			GraphPath:                   *flGraphPath,
+			AutoRestart:                 *flAutoRestart,
+			EnableCors:                  *flEnableCors,
+			Dns:                         dns,
+			EnableIptables:              *flEnableIptables,
+			BridgeIface:                 bridge,
+			ProtoAddresses:              flHosts,
+			DefaultIp:                   ip,
+			InterContainerCommunication: *flInterContainerComm,
+		}
+		if err := daemon(config); err != nil {
 			log.Fatal(err)
 			log.Fatal(err)
-			os.Exit(-1)
 		}
 		}
 	} else {
 	} else {
 		if len(flHosts) > 1 {
 		if len(flHosts) > 1 {
 			log.Fatal("Please specify only one -H")
 			log.Fatal("Please specify only one -H")
-			return
 		}
 		}
 		protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
 		protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
 		if err := docker.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
 		if err := docker.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
@@ -79,7 +107,6 @@ func main() {
 				os.Exit(sterr.Status)
 				os.Exit(sterr.Status)
 			}
 			}
 			log.Fatal(err)
 			log.Fatal(err)
-			os.Exit(-1)
 		}
 		}
 	}
 	}
 }
 }
@@ -115,30 +142,30 @@ func removePidFile(pidfile string) {
 	}
 	}
 }
 }
 
 
-func daemon(pidfile string, flGraphPath string, protoAddrs []string, autoRestart, enableCors bool, flDns string) error {
-	if err := createPidFile(pidfile); err != nil {
+func daemon(config *docker.DaemonConfig) error {
+	if err := createPidFile(config.Pidfile); err != nil {
 		log.Fatal(err)
 		log.Fatal(err)
 	}
 	}
-	defer removePidFile(pidfile)
+	defer removePidFile(config.Pidfile)
+
+	server, err := docker.NewServer(config)
+	if err != nil {
+		return err
+	}
+	defer server.Close()
 
 
 	c := make(chan os.Signal, 1)
 	c := make(chan os.Signal, 1)
 	signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
 	signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
 	go func() {
 	go func() {
 		sig := <-c
 		sig := <-c
 		log.Printf("Received signal '%v', exiting\n", sig)
 		log.Printf("Received signal '%v', exiting\n", sig)
-		removePidFile(pidfile)
+		server.Close()
+		removePidFile(config.Pidfile)
 		os.Exit(0)
 		os.Exit(0)
 	}()
 	}()
-	var dns []string
-	if flDns != "" {
-		dns = []string{flDns}
-	}
-	server, err := docker.NewServer(flGraphPath, autoRestart, enableCors, dns)
-	if err != nil {
-		return err
-	}
-	chErrors := make(chan error, len(protoAddrs))
-	for _, protoAddr := range protoAddrs {
+
+	chErrors := make(chan error, len(config.ProtoAddresses))
+	for _, protoAddr := range config.ProtoAddresses {
 		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
 		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
 		if protoAddrParts[0] == "unix" {
 		if protoAddrParts[0] == "unix" {
 			syscall.Unlink(protoAddrParts[1])
 			syscall.Unlink(protoAddrParts[1])
@@ -147,14 +174,15 @@ func daemon(pidfile string, flGraphPath string, protoAddrs []string, autoRestart
 				log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 				log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 			}
 			}
 		} else {
 		} else {
+			server.Close()
+			removePidFile(config.Pidfile)
 			log.Fatal("Invalid protocol format.")
 			log.Fatal("Invalid protocol format.")
-			os.Exit(-1)
 		}
 		}
 		go func() {
 		go func() {
 			chErrors <- docker.ListenAndServe(protoAddrParts[0], protoAddrParts[1], server, true)
 			chErrors <- docker.ListenAndServe(protoAddrParts[0], protoAddrParts[1], server, true)
 		}()
 		}()
 	}
 	}
-	for i := 0; i < len(protoAddrs); i += 1 {
+	for i := 0; i < len(config.ProtoAddresses); i += 1 {
 		err := <-chErrors
 		err := <-chErrors
 		if err != nil {
 		if err != nil {
 			return err
 			return err

+ 16 - 0
dockerinit/dockerinit.go

@@ -0,0 +1,16 @@
+package main
+
+import (
+	"github.com/dotcloud/docker/sysinit"
+)
+
+var (
+	GITCOMMIT string
+	VERSION   string
+)
+
+func main() {
+	// Running in init mode
+	sysinit.SysInit()
+	return
+}

+ 6 - 1
docs/Dockerfile

@@ -1,11 +1,16 @@
 from ubuntu:12.04
 from ubuntu:12.04
 maintainer Nick Stinemates
 maintainer Nick Stinemates
+#
+#    docker build -t docker:docs . && docker run -p 8000:8000 docker:docs
+#
 
 
 run apt-get update
 run apt-get update
 run apt-get install -y python-setuptools make
 run apt-get install -y python-setuptools make
 run easy_install pip
 run easy_install pip
+#from docs/requirements.txt, but here to increase cacheability
+run pip install Sphinx==1.1.3
+run pip install sphinxcontrib-httpdomain==1.1.8
 add . /docs
 add . /docs
-run pip install -r /docs/requirements.txt
 run cd /docs; make docs
 run cd /docs; make docs
 
 
 expose 8000
 expose 8000

+ 88 - 25
docs/README.md

@@ -1,38 +1,94 @@
 Docker Documentation
 Docker Documentation
 ====================
 ====================
 
 
-Documentation
--------------
-This is your definite place to contribute to the docker documentation. After each push to master the documentation
-is automatically generated and made available on [docs.docker.io](http://docs.docker.io)
-
-Each of the .rst files under sources reflects a page on the documentation. 
+Overview
+--------
 
 
-Installation
-------------
+The source for Docker documentation is here under ``sources/`` in the
+form of .rst files. These files use
+[reStructuredText](http://docutils.sourceforge.net/rst.html)
+formatting with [Sphinx](http://sphinx-doc.org/) extensions for
+structure, cross-linking and indexing.
+
+The HTML files are built and hosted on
+[readthedocs.org](https://readthedocs.org/projects/docker/), appearing
+via proxy on https://docs.docker.io. The HTML files update
+automatically after each change to the master or release branch of the
+[docker files on GitHub](https://github.com/dotcloud/docker) thanks to
+post-commit hooks. The "release" branch maps to the "latest"
+documentation and the "master" branch maps to the "master"
+documentation. 
+
+**Warning**: The "master" documentation may include features not yet
+part of any official docker release. "Master" docs should be used only
+for understanding bleeding-edge development and "latest" should be
+used for the latest official release.
+
+If you need to manually trigger a build of an existing branch, then
+you can do that through the [readthedocs
+interface](https://readthedocs.org/builds/docker/). If you would like
+to add new build targets, including new branches or tags, then you
+must contact one of the existing maintainers and get your
+readthedocs.org account added to the maintainers list, or just file an
+issue on GitHub describing the branch/tag and why it needs to be added
+to the docs, and one of the maintainers will add it for you.
+
+Getting Started
+---------------
+
+To edit and test the docs, you'll need to install the Sphinx tool and
+its dependencies. There are two main ways to install this tool:
+
+###Native Installation
 
 
-* Work in your own fork of the code, we accept pull requests.
 * Install sphinx: `pip install sphinx`
 * Install sphinx: `pip install sphinx`
-    * Mac OS X: `[sudo] pip-2.7 install sphinx`)
+    * Mac OS X: `[sudo] pip-2.7 install sphinx`
 * Install sphinx httpdomain contrib package: `pip install sphinxcontrib-httpdomain`
 * Install sphinx httpdomain contrib package: `pip install sphinxcontrib-httpdomain`
     * Mac OS X: `[sudo] pip-2.7 install sphinxcontrib-httpdomain`
     * Mac OS X: `[sudo] pip-2.7 install sphinxcontrib-httpdomain`
 * If pip is not available you can probably install it using your favorite package manager as **python-pip**
 * If pip is not available you can probably install it using your favorite package manager as **python-pip**
 
 
+###Alternative Installation: Docker Container
+
+If you're running ``docker`` on your development machine then you may
+find it easier and cleaner to use the Dockerfile. This installs Sphinx
+in a container, adds the local ``docs/`` directory and builds the HTML
+docs inside the container, even starting a simple HTTP server on port
+8000 so that you can connect and see your changes. Just run ``docker
+build .`` and run the resulting image. This is the equivalent to
+``make clean server`` since each container starts clean.
+
+In the ``docs/`` directory, run:
+    ```docker build -t docker:docs . && docker run -p 8000:8000 docker:docs```
+
 Usage
 Usage
 -----
 -----
-* Change the `.rst` files with your favorite editor to your liking.
-* Run `make docs` to clean up old files and generate new ones.
-* Your static website can now be found in the `_build` directory.
-* To preview what you have generated run `make server` and open http://localhost:8000/ in your favorite browser.
+* Follow the contribution guidelines (``../CONTRIBUTING.md``)
+* Work in your own fork of the code, we accept pull requests.
+* Change the ``.rst`` files with your favorite editor -- try to keep the
+  lines short and respect RST and Sphinx conventions. 
+* Run ``make clean docs`` to clean up old files and generate new ones,
+  or just ``make docs`` to update after small changes.
+* Your static website can now be found in the ``_build`` directory.
+* To preview what you have generated run ``make server`` and open
+  http://localhost:8000/ in your favorite browser.
+
+``make clean docs`` must complete without any warnings or errors.
 
 
 Working using GitHub's file editor
 Working using GitHub's file editor
 ----------------------------------
 ----------------------------------
-Alternatively, for small changes and typo's you might want to use GitHub's built in file editor. It allows
-you to preview your changes right online. Just be careful not to create many commits.
+
+Alternatively, for small changes and typos you might want to use
+GitHub's built in file editor. It allows you to preview your changes
+right online (though there can be some differences between GitHub
+markdown and Sphinx RST). Just be careful not to create many commits.
 
 
 Images
 Images
 ------
 ------
-When you need to add images, try to make them as small as possible (e.g. as gif).
+
+When you need to add images, try to make them as small as possible
+(e.g. as gif). Usually images should go in the same directory as the
+.rst file which references them, or in a subdirectory if one already
+exists.
 
 
 Notes
 Notes
 -----
 -----
@@ -41,7 +97,7 @@ lessc ``lessc main.less`` or watched using watch-lessc ``watch-lessc -i main.les
 
 
 Guides on using sphinx
 Guides on using sphinx
 ----------------------
 ----------------------
-* To make links to certain pages create a link target like so:
+* To make links to certain sections create a link target like so:
 
 
   ```
   ```
     .. _hello_world:
     .. _hello_world:
@@ -52,7 +108,10 @@ Guides on using sphinx
     This is.. (etc.)
     This is.. (etc.)
   ```
   ```
 
 
-  The ``_hello_world:`` will make it possible to link to this position (page and marker) from all other pages.
+  The ``_hello_world:`` will make it possible to link to this position
+  (page and section heading) from all other pages. See the [Sphinx
+  docs](http://sphinx-doc.org/markup/inline.html#role-ref) for more
+  information and examples.
 
 
 * Notes, warnings and alarms
 * Notes, warnings and alarms
 
 
@@ -68,13 +127,17 @@ Guides on using sphinx
 
 
 * Code examples
 * Code examples
 
 
-  Start without $, so it's easy to copy and paste.
+  * Start without $, so it's easy to copy and paste.
+  * Use "sudo" with docker to ensure that your command is runnable
+    even if they haven't [used the *docker*
+    group](http://docs.docker.io/en/latest/use/basics/#why-sudo).
 
 
 Manpages
 Manpages
 --------
 --------
 
 
-* To make the manpages, simply run 'make man'. Please note there is a bug in spinx 1.1.3 which makes this fail.
-Upgrade to the latest version of sphinx.
-* Then preview the manpage by running `man _build/man/docker.1`, where _build/man/docker.1 is the path to the generated
-manfile
-* The manpages are also autogenerated by our hosted readthedocs here: http://docs-docker.dotcloud.com/projects/docker/downloads/
+* To make the manpages, run ``make man``. Please note there is a bug
+  in spinx 1.1.3 which makes this fail.  Upgrade to the latest version
+  of Sphinx.
+* Then preview the manpage by running ``man _build/man/docker.1``,
+  where ``_build/man/docker.1`` is the path to the generated manfile
+

+ 21 - 16
docs/sources/api/docker_remote_api_v1.0.rst

@@ -970,28 +970,33 @@ Create a new image from a container's changes
 
 
 	**Example request**:
 	**Example request**:
 
 
-        .. sourcecode:: http
+    .. sourcecode:: http
 
 
-           POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       Content-Type: application/json
+   
+       {
+           "Cmd": ["cat", "/world"],
+           "PortSpecs":["22"]
+       }
 
 
-        **Example response**:
+    **Example response**:
 
 
-        .. sourcecode:: http
+    .. sourcecode:: http
 
 
-           HTTP/1.1 201 OK
-	   Content-Type: application/vnd.docker.raw-stream
+       HTTP/1.1 201 OK
+       Content-Type: application/vnd.docker.raw-stream
 
 
-           {"Id":"596069db4bf5"}
+       {"Id":"596069db4bf5"}
 
 
-	:query container: source container
-	:query repo: repository
-	:query tag: tag
-	:query m: commit message
-	:query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
-	:query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
-        :statuscode 201: no error
-	:statuscode 404: no such container
-        :statuscode 500: server error
+    :query container: source container
+    :query repo: repository
+    :query tag: tag
+    :query m: commit message
+    :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
+    :statuscode 201: no error
+    :statuscode 404: no such container
+    :statuscode 500: server error
 
 
 
 
 3. Going further
 3. Going further

+ 31 - 26
docs/sources/api/docker_remote_api_v1.1.rst

@@ -977,32 +977,37 @@ Create a new image from a container's changes
 
 
 .. http:post:: /commit
 .. http:post:: /commit
 
 
-	Create a new image from a container's changes
-
-	**Example request**:
-
-        .. sourcecode:: http
-
-           POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
-
-        **Example response**:
-
-        .. sourcecode:: http
-
-           HTTP/1.1 201 OK
-	   Content-Type: application/vnd.docker.raw-stream
-
-           {"Id":"596069db4bf5"}
-
-	:query container: source container
-	:query repo: repository
-	:query tag: tag
-	:query m: commit message
-	:query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
-	:query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
-        :statuscode 201: no error
-	:statuscode 404: no such container
-        :statuscode 500: server error
+    Create a new image from a container's changes
+
+    **Example request**:
+    
+    .. sourcecode:: http
+    
+       POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       Content-Type: application/json
+   
+       {
+           "Cmd": ["cat", "/world"],
+           "PortSpecs":["22"]
+       }
+    
+    **Example response**:
+    
+    .. sourcecode:: http
+    
+        HTTP/1.1 201 OK
+        Content-Type: application/vnd.docker.raw-stream
+
+        {"Id":"596069db4bf5"}
+
+    :query container: source container
+    :query repo: repository
+    :query tag: tag
+    :query m: commit message
+    :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
+    :statuscode 201: no error
+    :statuscode 404: no such container
+    :statuscode 500: server error
 
 
 
 
 3. Going further
 3. Going further

+ 22 - 17
docs/sources/api/docker_remote_api_v1.2.rst

@@ -985,32 +985,37 @@ Create a new image from a container's changes
 
 
 .. http:post:: /commit
 .. http:post:: /commit
 
 
-	Create a new image from a container's changes
+    Create a new image from a container's changes
 
 
-	**Example request**:
+    **Example request**:
 
 
-        .. sourcecode:: http
+    .. sourcecode:: http
 
 
-           POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       Content-Type: application/json
+   
+       {
+           "Cmd": ["cat", "/world"],
+           "PortSpecs":["22"]
+       }
 
 
-        **Example response**:
+    **Example response**:
 
 
-        .. sourcecode:: http
+    .. sourcecode:: http
 
 
-           HTTP/1.1 201 OK
+       HTTP/1.1 201 OK
 	   Content-Type: application/vnd.docker.raw-stream
 	   Content-Type: application/vnd.docker.raw-stream
 
 
-           {"Id":"596069db4bf5"}
+       {"Id":"596069db4bf5"}
 
 
-	:query container: source container
-	:query repo: repository
-	:query tag: tag
-	:query m: commit message
-	:query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
-	:query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
-        :statuscode 201: no error
-	:statuscode 404: no such container
-        :statuscode 500: server error
+    :query container: source container
+    :query repo: repository
+    :query tag: tag
+    :query m: commit message
+    :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
+    :statuscode 201: no error
+    :statuscode 404: no such container
+    :statuscode 500: server error
 
 
 
 
 3. Going further
 3. Going further

+ 22 - 17
docs/sources/api/docker_remote_api_v1.3.rst

@@ -1034,32 +1034,37 @@ Create a new image from a container's changes
 
 
 .. http:post:: /commit
 .. http:post:: /commit
 
 
-	Create a new image from a container's changes
+    Create a new image from a container's changes
 
 
-	**Example request**:
+    **Example request**:
 
 
-        .. sourcecode:: http
+    .. sourcecode:: http
 
 
-           POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       Content-Type: application/json
+   
+       {
+           "Cmd": ["cat", "/world"],
+           "PortSpecs":["22"]
+       }
 
 
-        **Example response**:
+    **Example response**:
 
 
-        .. sourcecode:: http
+    .. sourcecode:: http
 
 
-           HTTP/1.1 201 OK
+       HTTP/1.1 201 OK
 	   Content-Type: application/vnd.docker.raw-stream
 	   Content-Type: application/vnd.docker.raw-stream
 
 
-           {"Id":"596069db4bf5"}
+       {"Id":"596069db4bf5"}
 
 
-	:query container: source container
-	:query repo: repository
-	:query tag: tag
-	:query m: commit message
-	:query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
-	:query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
-        :statuscode 201: no error
-	:statuscode 404: no such container
-        :statuscode 500: server error
+    :query container: source container
+    :query repo: repository
+    :query tag: tag
+    :query m: commit message
+    :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
+    :statuscode 201: no error
+    :statuscode 404: no such container
+    :statuscode 500: server error
 
 
 
 
 Monitor Docker's events
 Monitor Docker's events

+ 10 - 5
docs/sources/api/docker_remote_api_v1.4.rst

@@ -1084,23 +1084,28 @@ Create a new image from a container's changes
 
 
     .. sourcecode:: http
     .. sourcecode:: http
 
 
-        POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       Content-Type: application/json
+   
+       {
+           "Cmd": ["cat", "/world"],
+           "PortSpecs":["22"]
+       }
 
 
     **Example response**:
     **Example response**:
 
 
     .. sourcecode:: http
     .. sourcecode:: http
 
 
-        HTTP/1.1 201 OK
-	    Content-Type: application/vnd.docker.raw-stream
+       HTTP/1.1 201 OK
+	   Content-Type: application/vnd.docker.raw-stream
 
 
-        {"Id":"596069db4bf5"}
+       {"Id":"596069db4bf5"}
 
 
     :query container: source container
     :query container: source container
     :query repo: repository
     :query repo: repository
     :query tag: tag
     :query tag: tag
     :query m: commit message
     :query m: commit message
     :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
     :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
-    :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
     :statuscode 201: no error
     :statuscode 201: no error
     :statuscode 404: no such container
     :statuscode 404: no such container
     :statuscode 500: server error
     :statuscode 500: server error

+ 23 - 18
docs/sources/api/docker_remote_api_v1.5.rst

@@ -1050,32 +1050,37 @@ Create a new image from a container's changes
 
 
 .. http:post:: /commit
 .. http:post:: /commit
 
 
-  Create a new image from a container's changes
+    Create a new image from a container's changes
 
 
-  **Example request**:
+    **Example request**:
 
 
-  .. sourcecode:: http
+    .. sourcecode:: http
 
 
-    POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       Content-Type: application/json
+   
+       {
+           "Cmd": ["cat", "/world"],
+           "PortSpecs":["22"]
+       }
 
 
-  **Example response**:
+    **Example response**:
 
 
-  .. sourcecode:: http
+    .. sourcecode:: http
 
 
-    HTTP/1.1 201 OK
-    Content-Type: application/vnd.docker.raw-stream
+       HTTP/1.1 201 OK
+	   Content-Type: application/vnd.docker.raw-stream
 
 
-    {"Id":"596069db4bf5"}
+       {"Id":"596069db4bf5"}
 
 
-  :query container: source container
-  :query repo: repository
-  :query tag: tag
-  :query m: commit message
-  :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
-  :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
-  :statuscode 201: no error
-  :statuscode 404: no such container
-  :statuscode 500: server error
+    :query container: source container
+    :query repo: repository
+    :query tag: tag
+    :query m: commit message
+    :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
+    :statuscode 201: no error
+    :statuscode 404: no such container
+    :statuscode 500: server error
 
 
 Monitor Docker's events
 Monitor Docker's events
 ***********************
 ***********************

+ 16 - 6
docs/sources/api/docker_remote_api_v1.6.rst

@@ -13,9 +13,12 @@ Docker Remote API v1.6
 1. Brief introduction
 1. Brief introduction
 =====================
 =====================
 
 
-- The Remote API is replacing rcli
-- Default port in the docker daemon is 4243
-- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr
+- The Remote API has replaced rcli
+- The daemon listens on ``unix:///var/run/docker.sock``, but you can
+  :ref:`bind_docker`.
+- The API tends to be REST, but for some complex commands, like
+  ``attach`` or ``pull``, the HTTP connection is hijacked to transport
+  ``stdout, stdin`` and ``stderr``
 
 
 2. Endpoints
 2. Endpoints
 ============
 ============
@@ -148,6 +151,7 @@ Create a container
 	   }
 	   }
 	
 	
 	:jsonparam config: the container's configuration
 	:jsonparam config: the container's configuration
+ 	:query name: container name to use
 	:statuscode 201: no error
 	:statuscode 201: no error
 	:statuscode 404: no such container
 	:statuscode 404: no such container
 	:statuscode 406: impossible to attach (container not running)
 	:statuscode 406: impossible to attach (container not running)
@@ -442,7 +446,8 @@ Kill a container
 	.. sourcecode:: http
 	.. sourcecode:: http
 
 
 	   HTTP/1.1 204 OK
 	   HTTP/1.1 204 OK
-	   	
+
+	:query signal: Signal to send to the container (integer). When not set, SIGKILL is assumed and the call will waits for the container to exit.
 	:statuscode 204: no error
 	:statuscode 204: no error
 	:statuscode 404: no such container
 	:statuscode 404: no such container
 	:statuscode 500: server error
 	:statuscode 500: server error
@@ -1131,7 +1136,13 @@ Create a new image from a container's changes
 
 
     .. sourcecode:: http
     .. sourcecode:: http
 
 
-        POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+       Content-Type: application/json
+       
+       {
+           "Cmd": ["cat", "/world"],
+           "PortSpecs":["22"]
+       }
 
 
     **Example response**:
     **Example response**:
 
 
@@ -1147,7 +1158,6 @@ Create a new image from a container's changes
     :query tag: tag
     :query tag: tag
     :query m: commit message
     :query m: commit message
     :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
     :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
-    :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
     :statuscode 201: no error
     :statuscode 201: no error
     :statuscode 404: no such container
     :statuscode 404: no such container
     :statuscode 500: server error
     :statuscode 500: server error

+ 1 - 1
docs/sources/api/index.rst

@@ -14,5 +14,5 @@ Your programs and scripts can access Docker's functionality via these interfaces
   registry_api
   registry_api
   index_api
   index_api
   docker_remote_api
   docker_remote_api
-
+  remote_api_client_libraries
 
 

+ 709 - 38
docs/sources/commandline/cli.rst

@@ -4,11 +4,8 @@
 
 
 .. _cli:
 .. _cli:
 
 
-Overview
-======================
-
-Docker Usage
-~~~~~~~~~~~~~~~~~~
+Command Line Help
+-----------------
 
 
 To list available commands, either run ``docker`` with no parameters or execute
 To list available commands, either run ``docker`` with no parameters or execute
 ``docker help``::
 ``docker help``::
@@ -21,71 +18,745 @@ To list available commands, either run ``docker`` with no parameters or execute
 
 
     ...
     ...
 
 
+.. _cli_attach:
+
+``attach``
+----------
+
+::
+
+    Usage: docker attach CONTAINER
+
+    Attach to a running container.
+
+      -nostdin=false: Do not attach stdin
+      -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
+
+You can detach from the container again (and leave it running) with
+``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
+the Docker client when it quits.
+
+To stop a container, use ``docker stop``
+
+To kill the container, use ``docker kill``
+
+.. _cli_attach_examples:
+ 
+Examples:
+~~~~~~~~~
+
+.. code-block:: bash
+
+     $ ID=$(sudo docker run -d ubuntu /usr/bin/top -b)
+     $ sudo docker attach $ID
+     top - 02:05:52 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
+     Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+     Cpu(s):  0.1%us,  0.2%sy,  0.0%ni, 99.7%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+     Mem:    373572k total,   355560k used,    18012k free,    27872k buffers
+     Swap:   786428k total,        0k used,   786428k free,   221740k cached
+
+     PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
+      1 root      20   0 17200 1116  912 R    0  0.3   0:00.03 top                
+
+      top - 02:05:55 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
+      Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+      Cpu(s):  0.0%us,  0.2%sy,  0.0%ni, 99.8%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+      Mem:    373572k total,   355244k used,    18328k free,    27872k buffers
+      Swap:   786428k total,        0k used,   786428k free,   221776k cached
+
+        PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
+	    1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top                
+
+
+      top - 02:05:58 up  3:06,  0 users,  load average: 0.01, 0.02, 0.05
+      Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+      Cpu(s):  0.2%us,  0.3%sy,  0.0%ni, 99.5%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+      Mem:    373572k total,   355780k used,    17792k free,    27880k buffers
+      Swap:   786428k total,        0k used,   786428k free,   221776k cached
+
+      PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
+           1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top                
+     ^C$ 
+     $ sudo docker stop $ID
+
+.. _cli_build:
+
+``build``
+---------
+
+::
+
+    Usage: docker build [OPTIONS] PATH | URL | -
+    Build a new container image from the source code at PATH
+      -t="": Repository name (and optionally a tag) to be applied to the resulting image in case of success.
+      -q=false: Suppress verbose build output.
+      -no-cache: Do not use the cache when building the image.
+      -rm: Remove intermediate containers after a successful build
+    When a single Dockerfile is given as URL, then no context is set. When a git repository is set as URL, the repository is used as context
+
+.. _cli_build_examples:
+
+Examples:
+~~~~~~~~~
+
+.. code-block:: bash
+
+    sudo docker build .
+
+This will read the ``Dockerfile`` from the current directory. It will
+also send any other files and directories found in the current
+directory to the ``docker`` daemon.
+
+The contents of this directory would be used by ``ADD`` commands found
+within the ``Dockerfile``.  This will send a lot of data to the
+``docker`` daemon if the current directory contains a lot of data.  If
+the absolute path is provided instead of ``.`` then only the files and
+directories required by the ADD commands from the ``Dockerfile`` will be
+added to the context and transferred to the ``docker`` daemon.
+
+.. code-block:: bash
+
+   sudo docker build -t vieux/apache:2.0 .
+
+This will build like the previous example, but it will then tag the
+resulting image. The repository name will be ``vieux/apache`` and the
+tag will be ``2.0``
+
+
+.. code-block:: bash
+
+    sudo docker build - < Dockerfile
+
+This will read a ``Dockerfile`` from *stdin* without context. Due to
+the lack of a context, no contents of any local directory will be sent
+to the ``docker`` daemon.  ``ADD`` doesn't work when running in this
+mode because the absence of the context provides no source files to
+copy to the container.
+
+
+.. code-block:: bash
+
+    sudo docker build github.com/creack/docker-firefox
+
+This will clone the Github repository and use it as context. The
+``Dockerfile`` at the root of the repository is used as
+``Dockerfile``.  Note that you can specify an arbitrary git repository
+by using the ``git://`` schema.
+
+
+.. _cli_commit:
+
+``commit``
+----------
+
+::
+
+    Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY [TAG]]
+
+    Create a new image from a container's changes
+
+      -m="": Commit message
+      -author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
+      -run="": Configuration to be applied when the image is launched with `docker run`. 
+               (ex: '{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
+
+Full -run example (multiline is ok within a single quote ``'``)
+
+::
+
+  $ sudo docker commit -run='
+  {
+      "Entrypoint" : null,
+      "Privileged" : false,
+      "User" : "",
+      "VolumesFrom" : "",
+      "Cmd" : ["cat", "-e", "/etc/resolv.conf"],
+      "Dns" : ["8.8.8.8", "8.8.4.4"],
+      "MemorySwap" : 0,
+      "AttachStdin" : false,
+      "AttachStderr" : false,
+      "CpuShares" : 0,
+      "OpenStdin" : false,
+      "Volumes" : null,
+      "Hostname" : "122612f45831",
+      "PortSpecs" : ["22", "80", "443"],
+      "Image" : "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+      "Tty" : false,
+      "Env" : [
+         "HOME=/",
+         "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+      ],
+      "StdinOnce" : false,
+      "Domainname" : "",
+      "WorkingDir" : "/",
+      "NetworkDisabled" : false,
+      "Memory" : 0,
+      "AttachStdout" : false
+  }' $CONTAINER_ID
+
+.. _cli_cp:
+
+``cp``
+------
+
+::
+
+    Usage: docker cp CONTAINER:RESOURCE HOSTPATH
+
+    Copy files/folders from the containers filesystem to the host
+    path.  Paths are relative to the root of the filesystem.
+
+.. _cli_diff:
+
+``diff``
+--------
+
+::
+
+    Usage: docker diff CONTAINER [OPTIONS]
+
+    Inspect changes on a container's filesystem
+
+.. _cli_events:
+
+``events``
+----------
+
+::
+
+    Usage: docker events
+
+    Get real time events from the server
+
+.. _cli_events_example:
+
+Examples
+~~~~~~~~
+
+You'll need two shells for this example.
+
+Shell 1: Listening for events
+.............................
+
+.. code-block:: bash
+    
+    $ sudo docker events
+
+Shell 2: Start and Stop a Container
+...................................
+
+.. code-block:: bash
+
+    $ sudo docker start 4386fb97867d
+    $ sudo docker stop 4386fb97867d
+
+Shell 1: (Again .. now showing events)
+......................................
+
+.. code-block:: bash
+
+    [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
+    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
+    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
+
+
+.. _cli_export:
+
+``export``
+----------
+
+::
+
+    Usage: docker export CONTAINER
+
+    Export the contents of a filesystem as a tar archive
+
+.. _cli_history:
+
+``history``
+-----------
+
+::
+
+    Usage: docker history [OPTIONS] IMAGE
+
+    Show the history of an image
+
+      -notrunc=false: Don't truncate output
+      -q=false: only show numeric IDs
+
+.. _cli_images:
+
+``images``
+----------
+
+::
+
+    Usage: docker images [OPTIONS] [NAME]
+
+    List images
+
+      -a=false: show all images
+      -q=false: only show numeric IDs
+      -viz=false: output in graphviz format
+
+Displaying images visually
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    sudo docker images -viz | dot -Tpng -o docker.png
+
+.. image:: docker_images.gif
+   :alt: Example inheritance graph of Docker images.
+
+.. _cli_import:
+
+``import``
+----------
+
+::
+
+    Usage: docker import URL|- [REPOSITORY [TAG]]
+
+    Create a new filesystem image from the contents of a tarball
+
+At this time, the URL must start with ``http`` and point to a single
+file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) containing a
+root filesystem. If you would like to import from a local directory or
+archive, you can use the ``-`` parameter to take the data from
+standard in.
+
+Examples
+~~~~~~~~
+
+Import from a remote location
+.............................
+
+``$ sudo docker import http://example.com/exampleimage.tgz exampleimagerepo``
+
+Import from a local file
+........................
+
+Import to docker via pipe and standard in
+
+``$ cat exampleimage.tgz | sudo docker import - exampleimagelocal``
+
+Import from a local directory
+.............................
+
+``$ sudo tar -c . | docker import - exampleimagedir``
+
+Note the ``sudo`` in this example -- you must preserve the ownership
+of the files (especially root ownership) during the archiving with
+tar. If you are not root (or sudo) when you tar, then the ownerships
+might not get preserved.
+
+.. _cli_info:
+
+``info``
+--------
+
+::
+
+    Usage: docker info
+
+    Display system-wide information.
+
+.. _cli_insert:
+
+``insert``
+----------
+
+::
+
+    Usage: docker insert IMAGE URL PATH
+
+    Insert a file from URL in the IMAGE at PATH
+
+Examples
+~~~~~~~~
+
+Insert file from github
+.......................
+
+.. code-block:: bash
+
+    $ sudo docker insert 8283e18b24bc https://raw.github.com/metalivedev/django/master/postinstall /tmp/postinstall.sh
+
+.. _cli_inspect:
+
+``inspect``
+-----------
+
+::
+
+    Usage: docker inspect [OPTIONS] CONTAINER
+
+    Return low-level information on a container
+
+.. _cli_kill:
+
+``kill``
+--------
+
+::
+
+    Usage: docker kill CONTAINER [CONTAINER...]
+
+    Kill a running container (Send SIGKILL)
+    
+The main process inside the container will be sent SIGKILL.
+
+.. _cli_login:
+
+``login``
+---------
+
+::
+
+    Usage: docker login [OPTIONS] [SERVER]
+
+    Register or Login to the docker registry server
+
+    -e="": email
+    -p="": password
+    -u="": username
+
+    If you want to login to a private registry you can
+    specify this by adding the server name.
+
+    example:
+    docker login localhost:8080
+
+
+.. _cli_logs:
+
+``logs``
+--------
+
+::
+
+    Usage: docker logs [OPTIONS] CONTAINER
+
+    Fetch the logs of a container
+
+
+.. _cli_port:
+
+``port``
+--------
+
+::
+
+    Usage: docker port [OPTIONS] CONTAINER PRIVATE_PORT
+
+    Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
+
+
+.. _cli_ps:
+
+``ps``
+------
+
+::
+
+    Usage: docker ps [OPTIONS]
+
+    List containers
+
+      -a=false: Show all containers. Only running containers are shown by default.
+      -notrunc=false: Don't truncate output
+      -q=false: Only display numeric IDs
+
+.. _cli_pull:
+
+``pull``
+--------
+
+::
+
+    Usage: docker pull NAME
+
+    Pull an image or a repository from the registry
+
+
+.. _cli_push:
+
+``push``
+--------
+
+::
+
+    Usage: docker push NAME
+
+    Push an image or a repository to the registry
+
+
+.. _cli_restart:
+
+``restart``
+-----------
+
+::
+
+    Usage: docker restart [OPTIONS] NAME
+
+    Restart a running container
+
+.. _cli_rm:
+
+``rm``
+------
+
+::
+
+    Usage: docker rm [OPTIONS] CONTAINER
+
+    Remove one or more containers
+        -link="": Remove the link instead of the actual container
+ 
+
+Examples:
+~~~~~~~~~
+
+.. code-block:: bash
+
+    $ docker rm /redis
+    /redis
+
+
+This will remove the container referenced under the link ``/redis``.
+
+
+.. code-block:: bash
+
+    $ docker rm -link /webapp/redis
+    /webapp/redis
+
+
+This will remove the underlying link between ``/webapp`` and the ``/redis`` containers removing all
+network communication.
+
+.. _cli_rmi:
+
+``rmi``
+-------
+
+::
+
+    Usage: docker rmi IMAGE [IMAGE...]
+
+    Remove one or more images
+
+.. _cli_run:
+
+``run``
+-------
+
+::
+
+    Usage: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...]
+
+    Run a command in a new container
+
+      -a=map[]: Attach to stdin, stdout or stderr
+      -c=0: CPU shares (relative weight)
+      -cidfile="": Write the container ID to the file
+      -d=false: Detached mode: Run container in the background, print new container id
+      -e=[]: Set environment variables
+      -h="": Container host name
+      -i=false: Keep stdin open even if not attached
+      -privileged=false: Give extended privileges to this container
+      -m=0: Memory limit (in bytes)
+      -n=true: Enable networking for this container
+      -p=[]: Map a network port to the container
+      -rm=false: Automatically remove the container when it exits (incompatible with -d)
+      -t=false: Allocate a pseudo-tty
+      -u="": Username or UID
+      -dns=[]: Set custom dns servers for the container
+      -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume.
+      -volumes-from="": Mount all volumes from the given container
+      -entrypoint="": Overwrite the default entrypoint set by the image
+      -w="": Working directory inside the container
+      -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
+      -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
+      -expose=[]: Expose a port from the container without publishing it to your host
+      -link="": Add link to another container (name:alias)
+      -name="": Assign the specified name to the container. If no name is specific docker will generate a random name
+
+Examples
+--------
+
+.. code-block:: bash
+
+    sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
+
+This will create a container and print "test" to the console. The
+``cidfile`` flag makes docker attempt to create a new file and write the
+container ID to it. If the file exists already, docker will return an
+error. Docker will close this file when docker run exits.
+
+.. code-block:: bash
+
+   docker run mount -t tmpfs none /var/spool/squid
+
+This will *not* work, because by default, most potentially dangerous
+kernel capabilities are dropped; including ``cap_sys_admin`` (which is
+required to mount filesystems). However, the ``-privileged`` flag will
+allow it to run:
+
+.. code-block:: bash
+
+   docker run -privileged mount -t tmpfs none /var/spool/squid
+
+The ``-privileged`` flag gives *all* capabilities to the container,
+and it also lifts all the limitations enforced by the ``device``
+cgroup controller. In other words, the container can then do almost
+everything that the host can do. This flag exists to allow special
+use-cases, like running Docker within Docker.
+
+.. code-block:: bash
+
+   docker  run -w /path/to/dir/ -i -t  ubuntu pwd
+
+The ``-w`` lets the command being executed inside directory given, 
+here /path/to/dir/. If the path does not exists it is created inside the 
+container.
+
+.. code-block:: bash
+
+   docker  run  -v `pwd`:`pwd` -w `pwd` -i -t  ubuntu pwd
+
+The ``-v`` flag mounts the current working directory into the container. 
+The ``-w`` lets the command being executed inside the current 
+working directory, by changing into the directory to the value
+returned by ``pwd``. So this combination executes the command
+using the container, but inside the current working directory.
+
+.. code-block:: bash
+
+    docker run -p 127.0.0.0::80 ubuntu bash
+
+This the ``-p`` flag now allows you to bind a port to a specific
+interface of the host machine.  In this example port ``80`` of the 
+container will have a dynamically allocated port bound to 127.0.0.1 
+of the host.
+
+.. code-block:: bash
+
+    docker run -p 127.0.0.1:80:80 ubuntu bash
+
+This will bind port ``80`` of the container to port ``80`` on 127.0.0.1 of your
+host machine.
+
+.. code-block:: bash
+
+    docker run -expose 80 ubuntu bash
+
+This will expose port ``80`` of the container for use within a link
+without publishing the port to the host system's interfaces.  
+
+.. code-block:: bash
+
+    docker run -name console -t -i ubuntu bash
+
+This will create and run a new container with the container name 
+being ``console``.
+
+.. code-block:: bash
+
+    docker run -link /redis:redis -name console ubuntu bash
+
+The ``-link`` flag will link the container named ``/redis`` into the 
+newly created container with the alias ``redis``.  The new container
+can access the network and environment of the redis container via
+environment variables.  The ``-name`` flag will assign the name ``console`` 
+to the newly created container.
+
+.. _cli_search:
+
+``search``
+----------
+
+::
+
+    Usage: docker search TERM
+
+    Searches for the TERM parameter on the Docker index and prints out
+    a list of repositories that match.
 
 
+.. _cli_start:
 
 
-Available Commands
-~~~~~~~~~~~~~~~~~~
+``start``
+---------
 
 
-.. include:: command/attach.rst
+::
 
 
-.. include:: command/build.rst
+    Usage: docker start [OPTIONS] NAME
 
 
-.. include:: command/commit.rst
+    Start a stopped container
 
 
-.. include:: command/cp.rst
+      -a=false: Attach container's stdout/stderr and forward all signals to the process
+      -i=false: Attach container's stdin
 
 
-.. include:: command/diff.rst
+.. _cli_stop:
 
 
-.. include:: command/events.rst
+``stop``
+--------
 
 
-.. include:: command/export.rst
+::
 
 
-.. include:: command/history.rst
+    Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...]
 
 
-.. include:: command/images.rst
+    Stop a running container (Send SIGTERM, and then SIGKILL after grace period)
 
 
-.. include:: command/import.rst
+      -t=10: Number of seconds to wait for the container to stop before killing it.
+      
+The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL
 
 
-.. include:: command/info.rst
+.. _cli_tag:
 
 
-.. include:: command/insert.rst
+``tag``
+-------
 
 
-.. include:: command/inspect.rst
+::
 
 
-.. include:: command/kill.rst
+    Usage: docker tag [OPTIONS] IMAGE REPOSITORY [TAG]
 
 
-.. include:: command/login.rst
+    Tag an image into a repository
 
 
-.. include:: command/logs.rst
+      -f=false: Force
 
 
-.. include:: command/port.rst
+.. _cli_top:
 
 
-.. include:: command/ps.rst
+``top``
+-------
 
 
-.. include:: command/pull.rst
+::
 
 
-.. include:: command/push.rst
+    Usage: docker top CONTAINER
 
 
-.. include:: command/restart.rst
+    Lookup the running processes of a container
 
 
-.. include:: command/rm.rst
+.. _cli_version:
 
 
-.. include:: command/rmi.rst
+``version``
+-----------
 
 
-.. include:: command/run.rst
+Show the version of the docker client, daemon, and latest released version.
 
 
-.. include:: command/search.rst
 
 
-.. include:: command/start.rst
+.. _cli_wait:
 
 
-.. include:: command/stop.rst
+``wait``
+--------
 
 
-.. include:: command/tag.rst
+::
 
 
-.. include:: command/top.rst
+    Usage: docker wait [OPTIONS] NAME
 
 
-.. include:: command/version.rst
+    Block until a container stops, then print its exit code.
 
 
-.. include:: command/wait.rst
 
 
 
 

+ 0 - 59
docs/sources/commandline/command/attach.rst

@@ -1,59 +0,0 @@
-:title: Attach Command
-:description: Attach to a running container
-:keywords: attach, container, docker, documentation
-
-===========================================
-``attach`` -- Attach to a running container
-===========================================
-
-::
-
-    Usage: docker attach CONTAINER
-
-    Attach to a running container.
-
-You can detach from the container again (and leave it running) with
-``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
-the Docker client when it quits.
-
-To stop a container, use ``docker stop``
-
-To kill the container, use ``docker kill``
- 
-Examples:
----------
-
-.. code-block:: bash
-
-     $ ID=$(sudo docker run -d ubuntu /usr/bin/top -b)
-     $ sudo docker attach $ID
-     top - 02:05:52 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
-     Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
-     Cpu(s):  0.1%us,  0.2%sy,  0.0%ni, 99.7%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
-     Mem:    373572k total,   355560k used,    18012k free,    27872k buffers
-     Swap:   786428k total,        0k used,   786428k free,   221740k cached
-
-     PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
-      1 root      20   0 17200 1116  912 R    0  0.3   0:00.03 top                
-
-      top - 02:05:55 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
-      Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
-      Cpu(s):  0.0%us,  0.2%sy,  0.0%ni, 99.8%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
-      Mem:    373572k total,   355244k used,    18328k free,    27872k buffers
-      Swap:   786428k total,        0k used,   786428k free,   221776k cached
-
-        PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
-	    1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top                
-
-
-      top - 02:05:58 up  3:06,  0 users,  load average: 0.01, 0.02, 0.05
-      Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
-      Cpu(s):  0.2%us,  0.3%sy,  0.0%ni, 99.5%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
-      Mem:    373572k total,   355780k used,    17792k free,    27880k buffers
-      Swap:   786428k total,        0k used,   786428k free,   221776k cached
-
-      PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
-           1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top                
-     ^C$ 
-     $ sudo docker stop $ID
-

+ 0 - 65
docs/sources/commandline/command/build.rst

@@ -1,65 +0,0 @@
-:title: Build Command
-:description: Build a new image from the Dockerfile passed via stdin
-:keywords: build, docker, container, documentation
-
-================================================
-``build`` -- Build a container from a Dockerfile
-================================================
-
-::
-
-    Usage: docker build [OPTIONS] PATH | URL | -
-    Build a new container image from the source code at PATH
-      -t="": Repository name (and optionally a tag) to be applied to the resulting image in case of success.
-      -q=false: Suppress verbose build output.
-      -no-cache: Do not use the cache when building the image.
-      -rm: Remove intermediate containers after a successful build
-    When a single Dockerfile is given as URL, then no context is set. When a git repository is set as URL, the repository is used as context
-
-
-Examples
---------
-
-.. code-block:: bash
-
-    sudo docker build .
-
-This will read the ``Dockerfile`` from the current directory. It will
-also send any other files and directories found in the current
-directory to the ``docker`` daemon.
-
-The contents of this directory would be used by ``ADD`` commands found
-within the ``Dockerfile``.  This will send a lot of data to the
-``docker`` daemon if the current directory contains a lot of data.  If
-the absolute path is provided instead of ``.`` then only the files and
-directories required by the ADD commands from the ``Dockerfile`` will be
-added to the context and transferred to the ``docker`` daemon.
-
-.. code-block:: bash
-
-   sudo docker build -t vieux/apache:2.0 .
-
-This will build like the previous example, but it will then tag the
-resulting image. The repository name will be ``vieux/apache`` and the
-tag will be ``2.0``
-
-
-.. code-block:: bash
-
-    sudo docker build - < Dockerfile
-
-This will read a ``Dockerfile`` from *stdin* without context. Due to
-the lack of a context, no contents of any local directory will be sent
-to the ``docker`` daemon.  ``ADD`` doesn't work when running in this
-mode because the absence of the context provides no source files to
-copy to the container.
-
-
-.. code-block:: bash
-
-    sudo docker build github.com/creack/docker-firefox
-
-This will clone the Github repository and use it as context. The
-``Dockerfile`` at the root of the repository is used as
-``Dockerfile``.  Note that you can specify an arbitrary git repository
-by using the ``git://`` schema.

+ 0 - 52
docs/sources/commandline/command/commit.rst

@@ -1,52 +0,0 @@
-:title: Commit Command
-:description: Create a new image from a container's changes
-:keywords: commit, docker, container, documentation
-
-===========================================================
-``commit`` -- Create a new image from a container's changes
-===========================================================
-
-::
-
-    Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY [TAG]]
-
-    Create a new image from a container's changes
-
-      -m="": Commit message
-      -author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
-      -run="": Configuration to be applied when the image is launched with `docker run`. 
-               (ex: '{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
-
-Full -run example (multiline is ok within a single quote ``'``)
-
-::
-
-  $ sudo docker commit -run='
-  {
-      "Entrypoint" : null,
-      "Privileged" : false,
-      "User" : "",
-      "VolumesFrom" : "",
-      "Cmd" : ["cat", "-e", "/etc/resolv.conf"],
-      "Dns" : ["8.8.8.8", "8.8.4.4"],
-      "MemorySwap" : 0,
-      "AttachStdin" : false,
-      "AttachStderr" : false,
-      "CpuShares" : 0,
-      "OpenStdin" : false,
-      "Volumes" : null,
-      "Hostname" : "122612f45831",
-      "PortSpecs" : ["22", "80", "443"],
-      "Image" : "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
-      "Tty" : false,
-      "Env" : [
-         "HOME=/",
-         "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-      ],
-      "StdinOnce" : false,
-      "Domainname" : "",
-      "WorkingDir" : "/",
-      "NetworkDisabled" : false,
-      "Memory" : 0,
-      "AttachStdout" : false
-  }' $CONTAINER_ID

+ 0 - 14
docs/sources/commandline/command/cp.rst

@@ -1,14 +0,0 @@
-:title: Cp Command
-:description: Copy files/folders from the containers filesystem to the host path
-:keywords: cp, docker, container, documentation, copy
-
-============================================================================
-``cp`` -- Copy files/folders from the containers filesystem to the host path
-============================================================================
-
-::
-
-    Usage: docker cp CONTAINER:RESOURCE HOSTPATH
-
-    Copy files/folders from the containers filesystem to the host
-    path.  Paths are relative to the root of the filesystem.

+ 0 - 13
docs/sources/commandline/command/diff.rst

@@ -1,13 +0,0 @@
-:title: Diff Command
-:description: Inspect changes on a container's filesystem
-:keywords: diff, docker, container, documentation
-
-=======================================================
-``diff`` -- Inspect changes on a container's filesystem
-=======================================================
-
-::
-
-    Usage: docker diff CONTAINER [OPTIONS]
-
-    Inspect changes on a container's filesystem

+ 0 - 34
docs/sources/commandline/command/events.rst

@@ -1,34 +0,0 @@
-:title: Events Command
-:description: Get real time events from the server
-:keywords: events, docker, documentation
-
-=================================================================
-``events`` -- Get real time events from the server
-=================================================================
-
-::
-
-    Usage: docker events
-
-    Get real time events from the server
-
-Examples
---------
-
-Starting and stopping a container
-.................................
-
-.. code-block:: bash
-
-    $ sudo docker start 4386fb97867d
-    $ sudo docker stop 4386fb97867d
-
-In another shell
-
-.. code-block:: bash
-    
-    $ sudo docker events
-    [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
-

+ 0 - 13
docs/sources/commandline/command/export.rst

@@ -1,13 +0,0 @@
-:title: Export Command
-:description: Export the contents of a filesystem as a tar archive
-:keywords: export, docker, container, documentation
-
-=================================================================
-``export`` -- Stream the contents of a container as a tar archive
-=================================================================
-
-::
-
-    Usage: docker export CONTAINER
-
-    Export the contents of a filesystem as a tar archive

+ 0 - 13
docs/sources/commandline/command/history.rst

@@ -1,13 +0,0 @@
-:title: History Command
-:description: Show the history of an image
-:keywords: history, docker, container, documentation
-
-===========================================
-``history`` -- Show the history of an image
-===========================================
-
-::
-
-    Usage: docker history [OPTIONS] IMAGE
-
-    Show the history of an image

+ 0 - 26
docs/sources/commandline/command/images.rst

@@ -1,26 +0,0 @@
-:title: Images Command
-:description: List images
-:keywords: images, docker, container, documentation
-
-=========================
-``images`` -- List images
-=========================
-
-::
-
-    Usage: docker images [OPTIONS] [NAME]
-
-    List images
-
-      -a=false: show all images
-      -q=false: only show numeric IDs
-      -viz=false: output in graphviz format
-
-Displaying images visually
---------------------------
-
-::
-
-    sudo docker images -viz | dot -Tpng -o docker.png
-
-.. image:: https://docs.docker.io/en/latest/_static/docker_images.gif

+ 0 - 44
docs/sources/commandline/command/import.rst

@@ -1,44 +0,0 @@
-:title: Import Command
-:description: Create a new filesystem image from the contents of a tarball
-:keywords: import, tarball, docker, url, documentation
-
-==========================================================================
-``import`` -- Create a new filesystem image from the contents of a tarball
-==========================================================================
-
-::
-
-    Usage: docker import URL|- [REPOSITORY [TAG]]
-
-    Create a new filesystem image from the contents of a tarball
-
-At this time, the URL must start with ``http`` and point to a single
-file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) containing a
-root filesystem. If you would like to import from a local directory or
-archive, you can use the ``-`` parameter to take the data from
-standard in.
-
-Examples
---------
-
-Import from a remote location
-.............................
-
-``$ sudo docker import http://example.com/exampleimage.tgz exampleimagerepo``
-
-Import from a local file
-........................
-
-Import to docker via pipe and standard in
-
-``$ cat exampleimage.tgz | sudo docker import - exampleimagelocal``
-
-Import from a local directory
-.............................
-
-``$ sudo tar -c . | docker import - exampleimagedir``
-
-Note the ``sudo`` in this example -- you must preserve the ownership
-of the files (especially root ownership) during the archiving with
-tar. If you are not root (or sudo) when you tar, then the ownerships
-might not get preserved.

+ 0 - 13
docs/sources/commandline/command/info.rst

@@ -1,13 +0,0 @@
-:title: Info Command
-:description: Display system-wide information.
-:keywords: info, docker, information, documentation
-
-===========================================
-``info`` -- Display system-wide information
-===========================================
-
-::
-
-    Usage: docker info
-
-    Display system-wide information.

+ 0 - 23
docs/sources/commandline/command/insert.rst

@@ -1,23 +0,0 @@
-:title: Insert Command
-:description: Insert a file in an image
-:keywords: insert, image, docker, documentation
-
-==========================================================================
-``insert`` -- Insert a file in an image
-==========================================================================
-
-::
-
-    Usage: docker insert IMAGE URL PATH
-
-    Insert a file from URL in the IMAGE at PATH
-
-Examples
---------
-
-Insert file from github
-.......................
-
-.. code-block:: bash
-
-    $ sudo docker insert 8283e18b24bc https://raw.github.com/metalivedev/django/master/postinstall /tmp/postinstall.sh

+ 0 - 13
docs/sources/commandline/command/inspect.rst

@@ -1,13 +0,0 @@
-:title: Inspect Command
-:description: Return low-level information on a container
-:keywords: inspect, container, docker, documentation
-
-==========================================================
-``inspect`` -- Return low-level information on a container
-==========================================================
-
-::
-
-    Usage: docker inspect [OPTIONS] CONTAINER
-
-    Return low-level information on a container

+ 0 - 13
docs/sources/commandline/command/kill.rst

@@ -1,13 +0,0 @@
-:title: Kill Command
-:description: Kill a running container
-:keywords: kill, container, docker, documentation
-
-====================================
-``kill`` -- Kill a running container
-====================================
-
-::
-
-    Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...]
-
-    Kill a running container

+ 0 - 24
docs/sources/commandline/command/login.rst

@@ -1,24 +0,0 @@
-:title: Login Command
-:description: Register or Login to the docker registry server
-:keywords: login, docker, documentation
-
-============================================================
-``login`` -- Register or Login to the docker registry server
-============================================================
-
-::
-
-    Usage: docker login [OPTIONS] [SERVER]
-
-    Register or Login to the docker registry server
-
-    -e="": email
-    -p="": password
-    -u="": username
-
-    If you want to login to a private registry you can
-    specify this by adding the server name.
-
-    example:
-    docker login localhost:8080
-

+ 0 - 13
docs/sources/commandline/command/logs.rst

@@ -1,13 +0,0 @@
-:title: Logs Command
-:description: Fetch the logs of a container
-:keywords: logs, container, docker, documentation
-
-=========================================
-``logs`` -- Fetch the logs of a container
-=========================================
-
-::
-
-    Usage: docker logs [OPTIONS] CONTAINER
-
-    Fetch the logs of a container

+ 0 - 13
docs/sources/commandline/command/port.rst

@@ -1,13 +0,0 @@
-:title: Port Command
-:description: Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
-:keywords: port, docker, container, documentation
-
-=========================================================================
-``port`` -- Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
-=========================================================================
-
-::
-
-    Usage: docker port [OPTIONS] CONTAINER PRIVATE_PORT
-
-    Lookup the public-facing port which is NAT-ed to PRIVATE_PORT

+ 0 - 17
docs/sources/commandline/command/ps.rst

@@ -1,17 +0,0 @@
-:title: Ps Command
-:description: List containers
-:keywords: ps, docker, documentation, container
-
-=========================
-``ps`` -- List containers
-=========================
-
-::
-
-    Usage: docker ps [OPTIONS]
-
-    List containers
-
-      -a=false: Show all containers. Only running containers are shown by default.
-      -notrunc=false: Don't truncate output
-      -q=false: Only display numeric IDs

+ 0 - 13
docs/sources/commandline/command/pull.rst

@@ -1,13 +0,0 @@
-:title: Pull Command
-:description: Pull an image or a repository from the registry
-:keywords: pull, image, repo, repository, documentation, docker
-
-=========================================================================
-``pull`` -- Pull an image or a repository from the docker registry server
-=========================================================================
-
-::
-
-    Usage: docker pull NAME
-
-    Pull an image or a repository from the registry

+ 0 - 13
docs/sources/commandline/command/push.rst

@@ -1,13 +0,0 @@
-:title: Push Command
-:description: Push an image or a repository to the registry
-:keywords: push, docker, image, repository, documentation, repo
-
-=======================================================================
-``push`` -- Push an image or a repository to the docker registry server
-=======================================================================
-
-::
-
-    Usage: docker push NAME
-
-    Push an image or a repository to the registry

+ 0 - 13
docs/sources/commandline/command/restart.rst

@@ -1,13 +0,0 @@
-:title: Restart Command
-:description: Restart a running container
-:keywords: restart, container, docker, documentation
-
-==========================================
-``restart`` -- Restart a running container
-==========================================
-
-::
-
-    Usage: docker restart [OPTIONS] NAME
-
-    Restart a running container

+ 0 - 13
docs/sources/commandline/command/rm.rst

@@ -1,13 +0,0 @@
-:title: Rm Command
-:description: Remove a container
-:keywords: remove, container, docker, documentation, rm
-
-============================
-``rm`` -- Remove a container
-============================
-
-::
-
-    Usage: docker rm [OPTIONS] CONTAINER
-
-    Remove one or more containers

+ 0 - 13
docs/sources/commandline/command/rmi.rst

@@ -1,13 +0,0 @@
-:title: Rmi Command
-:description: Remove an image
-:keywords: rmi, remove, image, docker, documentation
-
-==========================
-``rmi`` -- Remove an image
-==========================
-
-::
-
-    Usage: docker rmi IMAGE [IMAGE...]
-
-    Remove one or more images

+ 0 - 85
docs/sources/commandline/command/run.rst

@@ -1,85 +0,0 @@
-:title: Run Command
-:description: Run a command in a new container
-:keywords: run, container, docker, documentation 
-
-===========================================
-``run`` -- Run a command in a new container
-===========================================
-
-::
-
-    Usage: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...]
-
-    Run a command in a new container
-
-      -a=map[]: Attach to stdin, stdout or stderr.
-      -c=0: CPU shares (relative weight)
-      -cidfile="": Write the container ID to the file
-      -d=false: Detached mode: Run container in the background, print new container id
-      -e=[]: Set environment variables
-      -h="": Container host name
-      -i=false: Keep stdin open even if not attached
-      -privileged=false: Give extended privileges to this container
-      -m=0: Memory limit (in bytes)
-      -n=true: Enable networking for this container
-      -p=[]: Map a network port to the container
-      -rm=false: Automatically remove the container when it exits (incompatible with -d)
-      -t=false: Allocate a pseudo-tty
-      -u="": Username or UID
-      -dns=[]: Set custom dns servers for the container
-      -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume.
-      -volumes-from="": Mount all volumes from the given container.
-      -entrypoint="": Overwrite the default entrypoint set by the image.
-      -w="": Working directory inside the container
-      -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
-
-Examples
---------
-
-.. code-block:: bash
-
-    sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
-
-This will create a container and print "test" to the console. The
-``cidfile`` flag makes docker attempt to create a new file and write the
-container ID to it. If the file exists already, docker will return an
-error. Docker will close this file when docker run exits.
-
-.. code-block:: bash
-
-   docker run mount -t tmpfs none /var/spool/squid
-
-This will *not* work, because by default, most potentially dangerous
-kernel capabilities are dropped; including ``cap_sys_admin`` (which is
-required to mount filesystems). However, the ``-privileged`` flag will
-allow it to run:
-
-.. code-block:: bash
-
-   docker run -privileged mount -t tmpfs none /var/spool/squid
-
-The ``-privileged`` flag gives *all* capabilities to the container,
-and it also lifts all the limitations enforced by the ``device``
-cgroup controller. In other words, the container can then do almost
-everything that the host can do. This flag exists to allow special
-use-cases, like running Docker within Docker.
-
-.. code-block:: bash
-
-   docker  run -w /path/to/dir/ -i -t  ubuntu pwd
-
-The ``-w`` lets the command being executed inside directory given, 
-here /path/to/dir/. If the path does not exists it is created inside the 
-container.
-
-.. code-block:: bash
-
-   docker  run  -v `pwd`:`pwd` -w `pwd` -i -t  ubuntu pwd
-
-The ``-v`` flag mounts the current working directory into the container. 
-The ``-w`` lets the command being executed inside the current 
-working directory, by changing into the directory to the value
-returned by ``pwd``. So this combination executes the command
-using the container, but inside the current working directory.
-
-

+ 0 - 14
docs/sources/commandline/command/search.rst

@@ -1,14 +0,0 @@
-:title: Search Command
-:description: Searches for the TERM parameter on the Docker index and prints out a list of repositories that match.
-:keywords: search, docker, image, documentation 
-
-===================================================================
-``search`` -- Search for an image in the docker index
-===================================================================
-
-::
-
-    Usage: docker search TERM
-
-    Searches for the TERM parameter on the Docker index and prints out
-    a list of repositories that match.

+ 0 - 13
docs/sources/commandline/command/start.rst

@@ -1,13 +0,0 @@
-:title: Start Command
-:description: Start a stopped container
-:keywords: start, docker, container, documentation
-
-======================================
-``start`` -- Start a stopped container
-======================================
-
-::
-
-    Usage: docker start [OPTIONS] NAME
-
-    Start a stopped container

+ 0 - 15
docs/sources/commandline/command/stop.rst

@@ -1,15 +0,0 @@
-:title: Stop Command
-:description: Stop a running container
-:keywords: stop, container, docker, documentation
-
-====================================
-``stop`` -- Stop a running container
-====================================
-
-::
-
-    Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...]
-
-    Stop a running container
-
-      -t=10: Number of seconds to wait for the container to stop before killing it.

+ 0 - 15
docs/sources/commandline/command/tag.rst

@@ -1,15 +0,0 @@
-:title: Tag Command
-:description: Tag an image into a repository
-:keywords: tag, docker, image, repository, documentation, repo
-
-=========================================
-``tag`` -- Tag an image into a repository
-=========================================
-
-::
-
-    Usage: docker tag [OPTIONS] IMAGE REPOSITORY [TAG]
-
-    Tag an image into a repository
-
-      -f=false: Force

+ 0 - 13
docs/sources/commandline/command/top.rst

@@ -1,13 +0,0 @@
-:title: Top Command
-:description: Lookup the running processes of a container
-:keywords: top, docker, container, documentation
-
-=======================================================
-``top`` -- Lookup the running processes of a container
-=======================================================
-
-::
-
-    Usage: docker top CONTAINER
-
-    Lookup the running processes of a container

+ 0 - 7
docs/sources/commandline/command/version.rst

@@ -1,7 +0,0 @@
-:title: Version Command
-:description: 
-:keywords: version, docker, documentation
-
-==================================================
-``version`` -- Show the docker version information
-==================================================

+ 0 - 13
docs/sources/commandline/command/wait.rst

@@ -1,13 +0,0 @@
-:title: Wait Command
-:description: Block until a container stops, then print its exit code.
-:keywords: wait, docker, container, documentation
-
-===================================================================
-``wait`` -- Block until a container stops, then print its exit code
-===================================================================
-
-::
-
-    Usage: docker wait [OPTIONS] NAME
-
-    Block until a container stops, then print its exit code.

+ 0 - 0
docs/sources/static_files/docker_images.gif → docs/sources/commandline/docker_images.gif


+ 2 - 33
docs/sources/commandline/index.rst

@@ -1,6 +1,6 @@
 :title: Commands
 :title: Commands
-:description: -- todo: change me
-:keywords: todo, commands, command line, help, docker, documentation
+:description: docker command line interface
+:keywords: commands, command line, help, docker
 
 
 
 
 Commands
 Commands
@@ -12,34 +12,3 @@ Contents:
   :maxdepth: 1
   :maxdepth: 1
 
 
   cli
   cli
-  attach  <command/attach>
-  build   <command/build>
-  commit  <command/commit>
-  cp      <command/cp>
-  diff    <command/diff>
-  events  <command/events>
-  export  <command/export>
-  history <command/history>
-  images  <command/images>
-  import  <command/import>
-  info    <command/info>
-  insert  <command/insert>
-  inspect <command/inspect>
-  kill    <command/kill>
-  login   <command/login>
-  logs    <command/logs>
-  port    <command/port>
-  ps      <command/ps>
-  pull    <command/pull>
-  push    <command/push>
-  restart <command/restart>
-  rm      <command/rm>
-  rmi     <command/rmi>
-  run     <command/run>
-  search  <command/search>
-  start   <command/start>
-  stop    <command/stop>
-  tag     <command/tag>
-  top     <command/top>
-  version <command/version>
-  wait    <command/wait>

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است