Browse Source

Merge branch master into bump_v1.4.0

Docker-DCO-1.1-Signed-off-by: Jessica Frazelle <jess@docker.com> (github: jfrazelle)
Jessica Frazelle 10 năm trước cách đây
mục cha
commit
debf60b466
100 tập tin đã thay đổi với 3608 bổ sung1141 xóa
  1. 1 0
      .gitignore
  2. 29 1
      .mailmap
  3. 0 39
      .travis.yml
  4. 178 57
      AUTHORS
  5. 1 1
      CHANGELOG.md
  6. 23 10
      CONTRIBUTING.md
  7. 5 3
      Dockerfile
  8. 25 6
      Makefile
  9. 8 5
      README.md
  10. 1 1
      VERSION
  11. 1 0
      api/MAINTAINERS
  12. 35 2
      api/client/cli.go
  13. 206 61
      api/client/commands.go
  14. 1 1
      api/client/hijack.go
  15. 32 24
      api/client/utils.go
  16. 27 2
      api/common.go
  17. 171 111
      api/server/server.go
  18. 36 12
      builder/dispatchers.go
  19. 2 1
      builder/evaluator.go
  20. 55 39
      builder/internals.go
  21. 7 5
      builder/job.go
  22. 131 8
      builder/parser/line_parsers.go
  23. 6 4
      builder/parser/parser.go
  24. 1 1
      builder/parser/testfiles-negative/env_no_value/Dockerfile
  25. 0 1
      builder/parser/testfiles/docker/Dockerfile
  26. 0 1
      builder/parser/testfiles/docker/result
  27. 15 0
      builder/parser/testfiles/env/Dockerfile
  28. 10 0
      builder/parser/testfiles/env/result
  29. 7 3
      contrib/check-config.sh
  30. 2 0
      contrib/completion/MAINTAINERS
  31. 171 48
      contrib/completion/bash/docker
  32. 4 4
      contrib/completion/fish/docker.fish
  33. 84 72
      contrib/completion/zsh/_docker
  34. 0 2
      contrib/desktop-integration/chromium/Dockerfile
  35. 0 2
      contrib/desktop-integration/gparted/Dockerfile
  36. 8 4
      contrib/docker-device-tool/device_tool.go
  37. 0 2
      contrib/host-integration/Dockerfile.dev
  38. 1 0
      contrib/init/systemd/MAINTAINERS
  39. 2 0
      contrib/init/upstart/MAINTAINERS
  40. 1 1
      contrib/mkimage-arch.sh
  41. 1 1
      contrib/mkimage-yum.sh
  42. 4 1
      contrib/mkimage/debootstrap
  43. 1 0
      daemon/MAINTAINERS
  44. 4 27
      daemon/attach.go
  45. 5 2
      daemon/config.go
  46. 69 12
      daemon/container.go
  47. 37 6
      daemon/create.go
  48. 21 10
      daemon/daemon.go
  49. 1 1
      daemon/daemon_aufs.go
  50. 7 0
      daemon/daemon_overlay.go
  51. 1 1
      daemon/delete.go
  52. 25 11
      daemon/exec.go
  53. 17 1
      daemon/execdriver/driver.go
  54. 1 0
      daemon/execdriver/lxc/MAINTAINERS
  55. 17 18
      daemon/execdriver/lxc/driver.go
  56. 1 61
      daemon/execdriver/lxc/init.go
  57. 0 55
      daemon/execdriver/lxc/lxc_init_linux.go
  58. 1 7
      daemon/execdriver/lxc/lxc_init_unsupported.go
  59. 66 5
      daemon/execdriver/lxc/lxc_template.go
  60. 160 0
      daemon/execdriver/lxc/lxc_template_unit_test.go
  61. 26 0
      daemon/execdriver/native/create.go
  62. 65 29
      daemon/execdriver/native/driver.go
  63. 1 7
      daemon/execdriver/native/init.go
  64. 2 8
      daemon/execdriver/native/utils.go
  65. 36 30
      daemon/graphdriver/aufs/aufs.go
  66. 40 11
      daemon/graphdriver/aufs/aufs_test.go
  67. 1 1
      daemon/graphdriver/aufs/mount.go
  68. 9 2
      daemon/graphdriver/btrfs/btrfs.go
  69. 24 0
      daemon/graphdriver/btrfs/version.go
  70. 13 0
      daemon/graphdriver/btrfs/version_none.go
  71. 13 0
      daemon/graphdriver/btrfs/version_test.go
  72. 1 0
      daemon/graphdriver/devmapper/MAINTAINERS
  73. 19 0
      daemon/graphdriver/devmapper/README.md
  74. 523 116
      daemon/graphdriver/devmapper/deviceset.go
  75. 5 1
      daemon/graphdriver/devmapper/devmapper_test.go
  76. 4 3
      daemon/graphdriver/devmapper/driver.go
  77. 2 16
      daemon/graphdriver/driver.go
  78. 3 1
      daemon/graphdriver/fsdiff.go
  79. 42 1
      daemon/graphdriver/graphtest/graphtest.go
  80. 157 0
      daemon/graphdriver/overlay/copy.go
  81. 370 0
      daemon/graphdriver/overlay/overlay.go
  82. 28 0
      daemon/graphdriver/overlay/overlay_test.go
  83. 3 0
      daemon/image_delete.go
  84. 15 1
      daemon/info.go
  85. 18 0
      daemon/inspect.go
  86. 12 7
      daemon/list.go
  87. 17 4
      daemon/logs.go
  88. 11 8
      daemon/monitor.go
  89. 30 38
      daemon/networkdriver/bridge/driver.go
  90. 55 39
      daemon/networkdriver/ipallocator/allocator.go
  91. 253 11
      daemon/networkdriver/ipallocator/allocator_test.go
  92. 0 15
      daemon/networkdriver/network_test.go
  93. 5 8
      daemon/networkdriver/portallocator/portallocator.go
  94. 29 0
      daemon/networkdriver/portallocator/portallocator_test.go
  95. 1 1
      daemon/networkdriver/portmapper/mapper.go
  96. 7 2
      daemon/networkdriver/portmapper/proxy.go
  97. 13 18
      daemon/networkdriver/utils.go
  98. 19 6
      daemon/state.go
  99. 3 1
      daemon/state_test.go
  100. 7 3
      daemon/utils.go

+ 1 - 0
.gitignore

@@ -4,6 +4,7 @@
 .vagrant*
 bin
 docker/docker
+*.exe
 .*.swp
 a.out
 *.orig

+ 29 - 1
.mailmap

@@ -1,8 +1,10 @@
-# Generate AUTHORS: hack/generate-authors.sh
+# Generate AUTHORS: project/generate-authors.sh
 
 # Tip for finding duplicates (besides scanning the output of AUTHORS for name
 # duplicates that aren't also email duplicates): scan the output of:
 #   git log --format='%aE - %aN' | sort -uf
+#
+# For explanation on this file format: man git-shortlog
 
 <charles.hooper@dotcloud.com> <chooper@plumata.com>
 <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
@@ -29,6 +31,7 @@ Andy Smith <github@anarkystic.com>
 <victor.vieux@docker.com> <dev@vvieux.com>
 <victor.vieux@docker.com> <victor@docker.com>
 <victor.vieux@docker.com> <vieux@docker.com>
+<victor.vieux@docker.com> <victorvieux@gmail.com>
 <dominik@honnef.co> <dominikh@fork-bomb.org>
 <ehanchrow@ine.com> <eric.hanchrow@gmail.com>
 Walter Stanish <walter@pratyeka.org>
@@ -54,6 +57,7 @@ Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
 <proppy@google.com> <proppy@aminche.com>
 <michael@docker.com> <michael@crosbymichael.com>
 <michael@docker.com> <crosby.michael@gmail.com>
+<michael@docker.com> <crosbymichael@gmail.com>
 <github@developersupport.net> <github@metaliveblog.com> 
 <brandon@ifup.org> <brandon@ifup.co>
 <dano@spotify.com> <daniel.norberg@gmail.com>
@@ -63,10 +67,13 @@ Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
 <sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
 <solomon@docker.com> <solomon.hykes@dotcloud.com>
 <solomon@docker.com> <solomon@dotcloud.com>
+<solomon@docker.com> <s@docker.com>
 Sven Dowideit <SvenDowideit@home.org.au>
 Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
 Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
 Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
+Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
+Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
 unclejack <unclejacksons@gmail.com> <unclejack@users.noreply.github.com>
 <alexl@redhat.com> <alexander.larsson@gmail.com>
 Alexandr Morozov <lk4d4math@gmail.com>
@@ -97,3 +104,24 @@ Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
 <andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
 Francisco Carriedo <fcarriedo@gmail.com>
 <julienbordellier@gmail.com> <git@julienbordellier.com>
+<ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
+<lk4d4@docker.com> <lk4d4math@gmail.com>
+<arnaud.porterie@docker.com> <icecrime@gmail.com>
+<baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
+Brian Goff <cpuguy83@gmail.com>
+<cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
+<ewindisch@docker.com> <eric@windisch.us>
+<frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
+Hollie Teal <hollie@docker.com>
+<hollie@docker.com> <hollie.teal@docker.com>
+<hollie@docker.com> <hollietealok@users.noreply.github.com>
+<huu@prismskylabs.com> <whoshuu@gmail.com>
+Jessica Frazelle <jess@docker.com> Jessie Frazelle <jfrazelle@users.noreply.github.com>
+<jess@docker.com> <jfrazelle@users.noreply.github.com>
+<konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
+<tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
+<estesp@linux.vnet.ibm.com> <estesp@gmail.com>
+<github@gone.nl> <thaJeztah@users.noreply.github.com>
+Thomas LEVEIL <thomasleveil@gmail.com> Thomas LÉVEIL <thomasleveil@users.noreply.github.com>
+<oi@truffles.me.uk> <timruffles@googlemail.com>
+<Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>

+ 0 - 39
.travis.yml

@@ -1,39 +0,0 @@
-# Note: right now we don't use go-specific features of travis.
-# Later we might automate "go test" etc. (or do it inside a docker container...?)
-
-language: go
-
-go:
-# This should match the version in the Dockerfile.
-  - 1.3.1
-# Test against older versions too, just for a little extra retrocompat.
-  - 1.2
-
-# Let us have pretty experimental Docker-based Travis workers.
-# (These spin up much faster than the VM-based ones.)
-sudo: false
-
-# Disable the normal go build.
-install:
-  - export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false")
-  - export AUTO_GOPATH=1
-# some of Docker's unit tests don't work inside Travis (yet!), so we purge those test files for now
-  - rm -f daemon/graphdriver/btrfs/*_test.go # fails to compile (missing header)
-  - rm -f daemon/graphdriver/devmapper/*_test.go # fails to compile (missing header)
-  - rm -f daemon/execdriver/lxc/*_test.go # fails to run (missing "lxc-start")
-  - rm -f daemon/graphdriver/aufs/*_test.go # fails to run ("backing file system is unsupported for this graph driver")
-  - rm -f daemon/graphdriver/vfs/*_test.go # fails to run (not root, which these tests assume "/var/tmp/... no owned by uid 0")
-  - rm -f daemon/networkdriver/bridge/*_test.go # fails to run ("Failed to initialize network driver")
-  - rm -f graph/*_test.go # fails to run ("mkdir /tmp/docker-test.../vfs/dir/foo/etc/postgres: permission denied")
-  - rm -f pkg/mount/*_test.go # fails to run ("permission denied")
-
-before_script:
-  - env | sort
-
-script:
-  - hack/make.sh validate-dco
-  - hack/make.sh validate-gofmt
-  - DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary
-  - ./hack/make.sh dynbinary dyntest-unit
-
-# vim:set sw=2 ts=2:

+ 178 - 57
AUTHORS

@@ -1,69 +1,87 @@
 # This file lists all individuals having contributed content to the repository.
-# For how it is generated, see `hack/generate-authors.sh`.
+# For how it is generated, see `project/generate-authors.sh`.
 
 Aanand Prasad <aanand.prasad@gmail.com>
 Aaron Feng <aaron.feng@gmail.com>
 Aaron Huslage <huslage@gmail.com>
 Abel Muiño <amuino@gmail.com>
+Abhinav Ajgaonkar <abhinav316@gmail.com>
+Abin Shahab <ashahab@altiscale.com>
 Adam Miller <admiller@redhat.com>
 Adam Singer <financeCoding@gmail.com>
 Aditya <aditya@netroy.in>
 Adrian Mouat <adrian.mouat@gmail.com>
 Adrien Folie <folie.adrien@gmail.com>
+Ahmet Alp Balkan <ahmetb@microsoft.com>
 AJ Bowen <aj@gandi.net>
-Al Tobey <al@ooyala.com>
 alambike <alambike@gmail.com>
+Alan Thompson <cloojure@gmail.com>
+Albert Callarisa <shark234@gmail.com>
 Albert Zhang <zhgwenming@gmail.com>
 Aleksa Sarai <cyphar@cyphar.com>
-Alex Gaynor <alex.gaynor@gmail.com>
-Alex Warhawk <ax.warhawk@gmail.com>
 Alexander Larsson <alexl@redhat.com>
 Alexander Shopov <ash@kambanaria.org>
-Alexandr Morozov <lk4d4math@gmail.com>
+Alexandr Morozov <lk4d4@docker.com>
 Alexey Kotlyarov <alexey@infoxchange.net.au>
 Alexey Shamrin <shamrin@gmail.com>
+Alex Gaynor <alex.gaynor@gmail.com>
 Alexis THOMAS <fr.alexisthomas@gmail.com>
+Alex Warhawk <ax.warhawk@gmail.com>
 almoehi <almoehi@users.noreply.github.com>
+Al Tobey <al@ooyala.com>
+Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
 amangoel <amangoel@gmail.com>
+Amit Bakshi <ambakshi@gmail.com>
 AnandkumarPatel <anandkumarpatel@gmail.com>
-Andre Dublin <81dublin@gmail.com>
+Anand Patil <anand.prabhakar.patil@gmail.com>
 Andrea Luzzardi <aluzzardi@gmail.com>
-Andrea Turli <andrea.turli@gmail.com>
+Andreas Köhler <andi5.py@gmx.net>
 Andreas Savvides <andreas@editd.com>
 Andreas Tiefenthaler <at@an-ti.eu>
+Andrea Turli <andrea.turli@gmail.com>
+Andre Dublin <81dublin@gmail.com>
 Andrew Duckworth <grillopress@gmail.com>
 Andrew France <andrew@avito.co.uk>
 Andrew Macgregor <andrew.macgregor@agworld.com.au>
 Andrew Munsell <andrew@wizardapps.net>
+Andrews Medina <andrewsmedina@gmail.com>
 Andrew Weiss <andrew.weiss@outlook.com>
 Andrew Williams <williams.andrew@gmail.com>
-Andrews Medina <andrewsmedina@gmail.com>
+Andrey Petrov <andrey.petrov@shazow.net>
+Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
 Andy Chambers <anchambers@paypal.com>
 andy diller <dillera@gmail.com>
 Andy Goldstein <agoldste@redhat.com>
 Andy Kipp <andy@rstudio.com>
 Andy Rothfusz <github@developersupport.net>
 Andy Smith <github@anarkystic.com>
+Andy Wilson <wilson.andrew.j+github@gmail.com>
 Anthony Bishopric <git@anthonybishopric.com>
 Anton Löfgren <anton.lofgren@gmail.com>
 Anton Nikitin <anton.k.nikitin@gmail.com>
 Antony Messerli <amesserl@rackspace.com>
 apocas <petermdias@gmail.com>
-Arnaud Porterie <icecrime@gmail.com>
+ArikaChen <eaglesora@gmail.com>
+Arnaud Porterie <arnaud.porterie@docker.com>
+Arthur Gautier <baloo@gandi.net>
 Asbjørn Enge <asbjorn@hanafjedle.net>
+averagehuman <averagehuman@users.noreply.github.com>
+Avi Miller <avi.miller@oracle.com>
 Barnaby Gray <barnaby@pickle.me.uk>
 Barry Allard <barry.allard@gmail.com>
 Bartłomiej Piotrowski <b@bpiotrowski.pl>
 bdevloed <boris.de.vloed@gmail.com>
 Ben Firshman <ben@firshman.co.uk>
+Benjamin Atkin <ben@benatkin.com>
+Benoit Chesneau <bchesneau@gmail.com>
 Ben Sargent <ben@brokendigits.com>
 Ben Toews <mastahyeti@gmail.com>
 Ben Wiklund <ben@daisyowl.com>
-Benjamin Atkin <ben@benatkin.com>
-Benoit Chesneau <bchesneau@gmail.com>
 Bernerd Schaefer <bj.schaefer@gmail.com>
+Bert Goethals <bert@bertg.be>
 Bhiraj Butala <abhiraj.butala@gmail.com>
 bin liu <liubin0329@users.noreply.github.com>
+Blake Geno <blakegeno@gmail.com>
 Bouke Haarsma <bouke@webatoom.nl>
 Boyd Hemphill <boyd@feedmagnet.com>
 Brandon Liu <bdon@bdon.org>
@@ -80,10 +98,13 @@ Brian Shumate <brian@couchbase.com>
 Brice Jaglin <bjaglin@teads.tv>
 Briehan Lombaard <briehan.lombaard@gmail.com>
 Bruno Bigras <bigras.bruno@gmail.com>
+Bruno Binet <bruno.binet@gmail.com>
 Bruno Renié <brutasse@gmail.com>
 Bryan Bess <squarejaw@bsbess.com>
 Bryan Matsuo <bryan.matsuo@gmail.com>
 Bryan Murphy <bmurphy1976@gmail.com>
+Burke Libbey <burke@libbey.me>
+Byung Kang <byung.kang.ctr@amrdec.army.mil>
 Caleb Spare <cespare@gmail.com>
 Calen Pennington <cale@edx.org>
 Cameron Boehmer <cameron.boehmer@gmail.com>
@@ -95,56 +116,68 @@ Charlie Lewis <charliel@lab41.org>
 Chewey <prosto-chewey@users.noreply.github.com>
 Chia-liang Kao <clkao@clkao.org>
 Chris Alfonso <calfonso@redhat.com>
+Chris Armstrong <chris@opdemand.com>
+chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
 Chris Snow <chsnow123@gmail.com>
 Chris St. Pierre <chris.a.st.pierre@gmail.com>
-chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
 Christian Berendt <berendt@b1-systems.de>
 ChristoperBiscardi <biscarch@sketcht.com>
-Christophe Troestler <christophe.Troestler@umons.ac.be>
 Christopher Currie <codemonkey+github@gmail.com>
 Christopher Rigor <crigor@gmail.com>
+Christophe Troestler <christophe.Troestler@umons.ac.be>
 Ciro S. Costa <ciro.costa@usp.br>
 Clayton Coleman <ccoleman@redhat.com>
 Colin Dunklau <colin.dunklau@gmail.com>
 Colin Rice <colin@daedrum.net>
 Colin Walters <walters@verbum.org>
 Cory Forsyth <cory.forsyth@gmail.com>
-cpuguy83 <cpuguy83@gmail.com>
 cressie176 <github@stephen-cresswell.net>
 Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
 Daan van Berkel <daan.v.berkel.1980@gmail.com>
+Daehyeok.Mun <daehyeok@gmail.com>
 Dafydd Crosby <dtcrsby@gmail.com>
 Dan Buch <d.buch@modcloth.com>
+Dan Cotora <dan@bluevision.ro>
+Dan Griffin <dgriffin@peer1.com>
 Dan Hirsch <thequux@upstandinghackers.com>
-Dan Keder <dan.keder@gmail.com>
-Dan McPherson <dmcphers@redhat.com>
-Dan Stine <sw@stinemail.com>
-Dan Walsh <dwalsh@redhat.com>
-Dan Williams <me@deedubs.com>
+Daniel, Dao Quang Minh <dqminh89@gmail.com>
 Daniel Exner <dex@dragonslave.de>
+Daniel Farrell <dfarrell@redhat.com>
 Daniel Garcia <daniel@danielgarcia.info>
 Daniel Gasienica <daniel@gasienica.ch>
+Daniel Menet <membership@sontags.ch>
 Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
 Daniel Norberg <dano@spotify.com>
 Daniel Nordberg <dnordberg@gmail.com>
 Daniel Robinson <gottagetmac@gmail.com>
 Daniel Von Fange <daniel@leancoder.com>
 Daniel YC Lin <dlin.tw@gmail.com>
-Daniel, Dao Quang Minh <dqminh89@gmail.com>
+Dan Keder <dan.keder@gmail.com>
+Dan McPherson <dmcphers@redhat.com>
 Danny Berger <dpb587@gmail.com>
 Danny Yates <danny@codeaholics.org>
+Dan Stine <sw@stinemail.com>
+Dan Walsh <dwalsh@redhat.com>
+Dan Williams <me@deedubs.com>
 Darren Coxall <darren@darrencoxall.com>
 Darren Shepherd <darren.s.shepherd@gmail.com>
 David Anderson <dave@natulte.net>
 David Calavera <david.calavera@gmail.com>
 David Corking <dmc-source@dcorking.com>
+Davide Ceretti <davide.ceretti@hogarthww.com>
 David Gageot <david@gageot.net>
+David Gebler <davidgebler@gmail.com>
 David Mcanulty <github@hellspark.com>
+David Pelaez <pelaez89@gmail.com>
 David Röthlisberger <david@rothlis.net>
 David Sissitka <me@dsissitka.com>
+Dawn Chen <dawnchen@google.com>
+decadent <decadent@users.noreply.github.com>
 Deni Bertovic <deni@kset.org>
 Derek <crq@kernel.org>
+Derek McGowan <derek@mcgstyle.net>
 Deric Crago <deric.crago@gmail.com>
+Deshi Xiao <dxiao@redhat.com>
 Dinesh Subhraveti <dineshs@altiscale.com>
 Djibril Koné <kone.djibril@gmail.com>
 dkumor <daniel@dkumor.com>
@@ -154,11 +187,13 @@ Dominik Honnef <dominik@honnef.co>
 Don Spaulding <donspauldingii@gmail.com>
 Doug Davis <dug@us.ibm.com>
 doug tangren <d.tangren@gmail.com>
-Dr Nic Williams <drnicwilliams@gmail.com>
+dragon788 <dragon788@users.noreply.github.com>
 Dražen Lučanin <kermit666@gmail.com>
+Dr Nic Williams <drnicwilliams@gmail.com>
 Dustin Sallings <dustin@spy.net>
 Edmund Wagner <edmund-wagner@web.de>
 Eiichi Tsukata <devel@etsukata.com>
+Eike Herzbach <eike@herzbach.net>
 Eivind Uggedal <eivind@uggedal.com>
 Elias Probst <mail@eliasprobst.eu>
 Emil Hernvall <emil@quench.at>
@@ -166,17 +201,19 @@ Emily Rose <emily@contactvibe.com>
 Eric Hanchrow <ehanchrow@ine.com>
 Eric Lee <thenorthsecedes@gmail.com>
 Eric Myhre <hash@exultant.us>
-Eric Windisch <eric@windisch.us>
+Eric Paris <eparis@redhat.com>
 Eric Windisch <ewindisch@docker.com>
 Erik Hollensbe <github@hollensbe.org>
 Erik Inge Bolsø <knan@redpill-linpro.com>
+Erik Kristensen <erik@erikkristensen.com>
 Erno Hopearuoho <erno.hopearuoho@gmail.com>
+Eugene Yakubovich <eugene.yakubovich@coreos.com>
 eugenkrizo <eugen.krizo@gmail.com>
+evanderkoogh <info@erronis.nl>
 Evan Hazlett <ejhazlett@gmail.com>
 Evan Krall <krall@yelp.com>
 Evan Phoenix <evan@fallingsnow.net>
 Evan Wies <evan@neomantra.net>
-evanderkoogh <info@erronis.nl>
 Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
 ezbercih <cem.ezberci@gmail.com>
 Fabio Falci <fabiofalci@gmail.com>
@@ -186,49 +223,60 @@ Faiz Khan <faizkhan00@gmail.com>
 Fareed Dudhia <fareeddudhia@googlemail.com>
 Felix Rabe <felix@rabe.io>
 Fernando <fermayo@gmail.com>
+Filipe Brandenburger <filbranden@google.com>
 Flavio Castelli <fcastelli@suse.com>
 FLGMwt <ryan.stelly@live.com>
 Francisco Carriedo <fcarriedo@gmail.com>
 Francisco Souza <f@souza.cc>
 Frank Macreery <frank@macreery.com>
-Fred Lifton <fred.lifton@docker.com>
+Frank Rosquin <frank.rosquin+github@gmail.com>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 Frederik Loeffert <frederik@zitrusmedia.de>
+Fred Lifton <fred.lifton@docker.com>
 Freek Kalter <freek@kalteronline.org>
 Gabe Rosenhouse <gabe@missionst.com>
 Gabor Nagy <mail@aigeruth.hu>
 Gabriel Monroy <gabriel@opdemand.com>
 Galen Sampson <galen.sampson@gmail.com>
 Gareth Rushgrove <gareth@morethanseven.net>
+gautam, prasanna <prasannagautam@gmail.com>
 Geoffrey Bachelet <grosfrais@gmail.com>
+George Xie <georgexsh@gmail.com>
 Gereon Frey <gereon.frey@dynport.de>
 German DZ <germ@ndz.com.ar>
 Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
 Giuseppe Mazzotta <gdm85@users.noreply.github.com>
 Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
+Gleb M Borisov <borisov.gleb@gmail.com>
 Glyn Normington <gnormington@gopivotal.com>
 Goffert van Gool <goffert@phusion.nl>
+golubbe <ben.golub@dotcloud.com>
 Graydon Hoare <graydon@pobox.com>
 Greg Thornton <xdissent@me.com>
 grunny <mwgrunny@gmail.com>
 Guilherme Salgado <gsalgado@gmail.com>
+Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
 Guillaume J. Charmes <guillaume.charmes@docker.com>
 Gurjeet Singh <gurjeet@singh.im>
 Guruprasad <lgp171188@gmail.com>
+Hans Rødtang <hansrodtang@gmail.com>
 Harald Albers <github@albersweb.de>
 Harley Laue <losinggeneration@gmail.com>
 Hector Castro <hectcastro@gmail.com>
 Henning Sprang <henning.sprang@gmail.com>
 Hobofan <goisser94@gmail.com>
-Hollie Teal <hollie.teal@docker.com>
-Hollie Teal <hollietealok@users.noreply.github.com>
-hollietealok <hollie@docker.com>
+Hollie Teal <hollie@docker.com>
+Huayi Zhang <irachex@gmail.com>
+Hugo Duncan <hugo@hugoduncan.org>
 Hunter Blanks <hunter@twilio.com>
+Hu Tao <hutao@cn.fujitsu.com>
+Huu Nguyen <huu@prismskylabs.com>
 hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
 Ian Babrou <ibobrik@gmail.com>
 Ian Bull <irbull@gmail.com>
 Ian Main <imain@redhat.com>
 Ian Truslove <ian.truslove@gmail.com>
+Igor Dolzhikov <bluesriverz@gmail.com>
 ILYA Khlopotov <ilya.khlopotov@gmail.com>
 inglesp <peter.inglesby@gmail.com>
 Isaac Dupree <antispam@idupree.com>
@@ -236,8 +284,8 @@ Isabel Jimenez <contact.isabeljimenez@gmail.com>
 Isao Jonas <isao.jonas@gmail.com>
 Ivan Fraixedes <ifcdev@gmail.com>
 Jack Danger Canty <jackdanger@squareup.com>
-Jake Moshenko <jake@devtable.com>
 jakedt <jake@devtable.com>
+Jake Moshenko <jake@devtable.com>
 James Allen <jamesallen0108@gmail.com>
 James Carr <james.r.carr@gmail.com>
 James DeFelice <james.defelice@ishisystems.com>
@@ -245,6 +293,7 @@ James Harrison Fisher <jameshfisher@gmail.com>
 James Kyle <james@jameskyle.org>
 James Mills <prologic@shortcircuit.net.au>
 James Turnbull <james@lovedthanlost.net>
+Jan Keromnes <janx@linux.com>
 Jan Pazdziora <jpazdziora@redhat.com>
 Jan Toebes <jan@toebes.info>
 Jaroslaw Zabiello <hipertracker@gmail.com>
@@ -256,31 +305,35 @@ Jason McVetta <jason.mcvetta@gmail.com>
 Jason Plum <jplum@devonit.com>
 Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
 Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
+Jean-Paul Calderone <exarkun@twistedmatrix.com>
 Jeff Lindsay <progrium@gmail.com>
-Jeff Welch <whatthejeff@gmail.com>
 Jeffrey Bolle <jeffreybolle@gmail.com>
+Jeff Welch <whatthejeff@gmail.com>
 Jeremy Grosser <jeremy@synack.me>
+Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
 Jesse Dubay <jesse@thefortytwo.net>
+Jessica Frazelle <jess@docker.com>
 Jezeniel Zapanta <jpzapanta22@gmail.com>
 Jilles Oldenbeuving <ojilles@gmail.com>
 Jim Alateras <jima@comware.com.au>
-Jim Perrin <jperrin@centos.org>
 Jimmy Cuadra <jimmy@jimmycuadra.com>
+Jim Perrin <jperrin@centos.org>
 Jiří Župka <jzupka@redhat.com>
 Joe Beda <joe.github@bedafamily.com>
+Joe Ferguson <joe@infosiftr.com>
+Joel Handwell <joelhandwell@gmail.com>
 Joe Shaw <joe@joeshaw.org>
 Joe Van Dyk <joe@tanga.com>
-Joel Handwell <joelhandwell@gmail.com>
 Joffrey F <joffrey@docker.com>
 Johan Euphrosine <proppy@google.com>
-Johan Rydberg <johan.rydberg@gmail.com>
 Johannes 'fish' Ziemke <github@freigeist.org>
+Johan Rydberg <johan.rydberg@gmail.com>
 John Costa <john.costa@gmail.com>
 John Feminella <jxf@jxf.me>
 John Gardiner Myers <jgmyers@proofpoint.com>
+John Gossman <johngos@microsoft.com>
 John OBrien III <jobrieniii@yahoo.com>
 John Warwick <jwarwick@gmail.com>
-Jon Wedaman <jweede@gmail.com>
 Jonas Pfenniger <jonas@pfenniger.name>
 Jonathan Boulle <jonathanboulle@gmail.com>
 Jonathan Camp <jonathan@irondojo.com>
@@ -288,22 +341,25 @@ Jonathan McCrohan <jmccrohan@gmail.com>
 Jonathan Mueller <j.mueller@apoveda.ch>
 Jonathan Pares <jonathanpa@users.noreply.github.com>
 Jonathan Rudenberg <jonathan@titanous.com>
+Jon Wedaman <jweede@gmail.com>
 Joost Cassee <joost@cassee.net>
 Jordan Arentsen <blissdev@gmail.com>
 Jordan Sissel <jls@semicomplete.com>
 Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
 Joseph Hager <ajhager@gmail.com>
-Josh <jokajak@gmail.com>
 Josh Hawn <josh.hawn@docker.com>
+Josh <jokajak@gmail.com>
 Josh Poimboeuf <jpoimboe@redhat.com>
+Josiah Kiehl <jkiehl@riotgames.com>
 JP <jpellerin@leapfrogonline.com>
+Julian Taylor <jtaylor.debian@googlemail.com>
 Julien Barbier <write0@gmail.com>
 Julien Bordellier <julienbordellier@gmail.com>
 Julien Dubois <julien.dubois@gmail.com>
 Justin Force <justin.force@gmail.com>
 Justin Plock <jplock@users.noreply.github.com>
 Justin Simonelis <justin.p.simonelis@gmail.com>
-Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
+Jyrki Puttonen <jyrkiput@gmail.com>
 Karan Lyons <karan@karanlyons.com>
 Karl Grzeszczak <karlgrz@gmail.com>
 Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
@@ -311,57 +367,68 @@ Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
 Keli Hu <dev@keli.hu>
 Ken Cochrane <kencochrane@gmail.com>
 Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
-Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
 Kevin Clark <kevin.clark@gmail.com>
 Kevin J. Lynagh <kevin@keminglabs.com>
 Kevin Menard <kevin@nirvdrum.com>
+Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
 Kevin Wallace <kevin@pentabarf.net>
 Keyvan Fatehi <keyvanfatehi@gmail.com>
 kies <lleelm@gmail.com>
-Kim BKC Carlbacker <kim.carlbacker@gmail.com>
 kim0 <email.ahmedkamal@googlemail.com>
+Kim BKC Carlbacker <kim.carlbacker@gmail.com>
 Kimbro Staken <kstaken@kstaken.com>
 Kiran Gangadharan <kiran.daredevil@gmail.com>
 knappe <tyler.knappe@gmail.com>
 Kohei Tsuruta <coheyxyz@gmail.com>
+Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
+krrg <krrgithub@gmail.com>
 Kyle Conroy <kyle.j.conroy@gmail.com>
 kyu <leehk1227@gmail.com>
 Lachlan Coote <lcoote@vmware.com>
+Lajos Papp <lajos.papp@sequenceiq.com>
+Lakshan Perera <lakshan@laktek.com>
 lalyos <lalyos@yahoo.com>
 Lance Chen <cyen0312@gmail.com>
 Lars R. Damerow <lars@pixar.com>
 Laurie Voss <github@seldo.com>
 leeplay <hyeongkyu.lee@navercorp.com>
+Lei Jitang <leijitang@huawei.com>
 Len Weincier <len@cloudafrica.net>
+Leszek Kowalski <github@leszekkowalski.pl>
 Levi Gross <levi@levigross.com>
 Lewis Peckover <lew+github@lew.io>
 Liang-Chi Hsieh <viirya@gmail.com>
+limsy <seongyeol37@gmail.com>
 Lokesh Mandvekar <lsm5@fedoraproject.org>
 Louis Opter <kalessin@kalessin.fr>
 lukaspustina <lukas.pustina@centerdevice.com>
 lukemarsden <luke@digital-crocus.com>
+Madhu Venugopal <madhu@socketplane.io>
 Mahesh Tiyyagura <tmahesh@gmail.com>
+Malte Janduda <mail@janduda.net>
 Manfred Zabarauskas <manfredas@zabarauskas.com>
 Manuel Meurer <manuel@krautcomputing.com>
 Manuel Woelker <github@manuel.woelker.org>
 Marc Abramowitz <marc@marc-abramowitz.com>
 Marc Kuo <kuomarc2@gmail.com>
-Marc Tamsky <mtamsky@gmail.com>
 Marco Hennings <marco.hennings@freiheit.com>
+Marc Tamsky <mtamsky@gmail.com>
 Marcus Farkas <toothlessgear@finitebox.com>
-Marcus Ramberg <marcus@nordaaker.com>
 marcuslinke <marcus.linke@gmx.de>
+Marcus Ramberg <marcus@nordaaker.com>
 Marek Goldmann <marek.goldmann@gmail.com>
 Marius Voila <marius.voila@gmail.com>
 Mark Allen <mrallen1@yahoo.com>
 Mark McGranaghan <mmcgrana@gmail.com>
 Marko Mikulicic <mmikulicic@gmail.com>
+Marko Tibold <marko@tibold.nl>
 Markus Fix <lispmeister@gmail.com>
 Martijn van Oosterhout <kleptog@svana.org>
 Martin Redmond <martin@tinychat.com>
 Mason Malone <mason.malone@gmail.com>
 Mateusz Sulima <sulima.mateusz@gmail.com>
+Mathias Monnerville <mathias@monnerville.com>
 Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
 Matt Apperson <me@mattapperson.com>
 Matt Bachmann <bachmann.matt@gmail.com>
@@ -372,17 +439,24 @@ Matthias Klumpp <matthias@tenstral.net>
 Matthias Kühnle <git.nivoc@neverbox.com>
 mattymo <raytrac3r@gmail.com>
 mattyw <mattyw@me.com>
-Max Shytikov <mshytikov@gmail.com>
-Maxim Treskin <zerthurd@gmail.com>
 Maxime Petazzoni <max@signalfuse.com>
+Maxim Treskin <zerthurd@gmail.com>
+Max Shytikov <mshytikov@gmail.com>
+Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
 meejah <meejah@meejah.ca>
+Mengdi Gao <usrgdd@gmail.com>
+Mert Yazıcıoğlu <merty@users.noreply.github.com>
 Michael Brown <michael@netdirect.ca>
 Michael Crosby <michael@docker.com>
 Michael Gorsuch <gorsuch@github.com>
+Michael Hudson-Doyle <michael.hudson@linaro.org>
 Michael Neale <michael.neale@gmail.com>
+Michaël Pailloncy <mpapo.dev@gmail.com>
 Michael Prokop <github@michael-prokop.at>
+Michael Scharf <github@scharf.gr>
 Michael Stapelberg <michael+gh@stapelberg.de>
-Michaël Pailloncy <mpapo.dev@gmail.com>
+Michael Thies <michaelthies78@gmail.com>
+Michal Jemala <michal.jemala@gmail.com>
 Michiel@unhosted <michiel@unhosted.org>
 Miguel Angel Fernández <elmendalerenda@gmail.com>
 Mike Chelen <michael.chelen@gmail.com>
@@ -395,32 +469,40 @@ Mohit Soni <mosoni@ebay.com>
 Morgante Pell <morgante.pell@morgante.net>
 Morten Siebuhr <sbhr@sbhr.dk>
 Mrunal Patel <mrunalp@gmail.com>
+mschurenko <matt.schurenko@gmail.com>
+Mustafa Akın <mustafa91@gmail.com>
 Nan Monnand Deng <monnand@gmail.com>
 Naoki Orii <norii@cs.cmu.edu>
 Nate Jones <nate@endot.org>
+Nathan Hsieh <hsieh.nathan@gmail.com>
 Nathan Kleyn <nathan@nathankleyn.com>
 Nathan LeClaire <nathan.leclaire@docker.com>
 Nelson Chen <crazysim@gmail.com>
 Niall O'Higgins <niallo@unworkable.org>
+Nicholas E. Rabenau <nerab@gmx.at>
 Nick Payne <nick@kurai.co.uk>
 Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
 Nick Stinemates <nick@stinemates.org>
+Nicolas De loof <nicolas.deloof@gmail.com>
 Nicolas Dudebout <nicolas.dudebout@gatech.edu>
+Nicolas Goy <kuon@goyman.com>
 Nicolas Kaiser <nikai@nikai.net>
 NikolaMandic <mn080202@gmail.com>
 noducks <onemannoducks@gmail.com>
 Nolan Darilek <nolan@thewordnerd.info>
-O.S. Tezer <ostezer@gmail.com>
+nzwsch <hi@nzwsch.com>
 OddBloke <daniel@daniel-watkins.co.uk>
 odk- <github@odkurzacz.org>
 Oguz Bilgic <fisyonet@gmail.com>
+Oh Jinkyun <tintypemolly@gmail.com>
 Ole Reifschneider <mail@ole-reifschneider.de>
 Olivier Gambier <dmp42@users.noreply.github.com>
+O.S. Tezer <ostezer@gmail.com>
 pandrew <letters@paulnotcom.se>
 Pascal Borreli <pascal@borreli.com>
+Pascal Hartig <phartig@rdrei.net>
 Patrick Hemmer <patrick.hemmer@gmail.com>
 pattichen <craftsbear@gmail.com>
-Paul <paul9869@gmail.com>
 Paul Annesley <paul@annesley.cc>
 Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Hammond <paul@paulhammond.org>
@@ -428,25 +510,39 @@ Paul Jimenez <pj@place.org>
 Paul Lietar <paul@lietar.net>
 Paul Morie <pmorie@gmail.com>
 Paul Nasrat <pnasrat@gmail.com>
+Paul <paul9869@gmail.com>
 Paul Weaver <pauweave@cisco.com>
+Pavlos Ratis <dastergon@gentoo.org>
 Peter Bourgon <peter@bourgon.org>
 Peter Braden <peterbraden@peterbraden.co.uk>
+Peter Ericson <pdericson@gmail.com>
+Peter Salvatore <peter@psftw.com>
 Peter Waller <p@pwaller.net>
-Phil <underscorephil@gmail.com>
-Phil Spitler <pspitler@gmail.com>
+Phil Estes <estesp@linux.vnet.ibm.com>
+Philipp Weissensteiner <mail@philippweissensteiner.com>
 Phillip Alexander <git@phillipalexander.io>
+Phil Spitler <pspitler@gmail.com>
+Phil <underscorephil@gmail.com>
 Piergiuliano Bossi <pgbossi@gmail.com>
 Pierre-Alain RIVIERE <pariviere@ippon.fr>
+Pierre <py@poujade.org>
 Piotr Bogdan <ppbogdan@gmail.com>
+pixelistik <pixelistik@users.noreply.github.com>
+Prasanna Gautam <prasannagautam@gmail.com>
+Przemek Hejman <przemyslaw.hejman@gmail.com>
 pysqz <randomq@126.com>
+Qiang Huang <h.huangqiang@huawei.com>
 Quentin Brossard <qbrossard@gmail.com>
 r0n22 <cameron.regan@gmail.com>
 Rafal Jeczalik <rjeczalik@gmail.com>
+Rafe Colton <rafael.colton@gmail.com>
 Rajat Pandit <rp@rajatpandit.com>
 Rajdeep Dua <dua_rajdeep@yahoo.com>
 Ralph Bean <rbean@redhat.com>
 Ramkumar Ramachandra <artagnon@gmail.com>
 Ramon van Alteren <ramon@vanalteren.nl>
+Recursive Madman <recursive.madman@gmx.de>
+Remi Rampin <remirampin@gmail.com>
 Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
 rgstephens <greg@udon.org>
 Rhys Hiltner <rhys@twitch.tv>
@@ -455,6 +551,7 @@ Richo Healey <richo@psych0tik.net>
 Rick Bradley <rick@users.noreply.github.com>
 Rick van de Loo <rickvandeloo@gmail.com>
 Robert Bachmann <rb@robertbachmann.at>
+Robert Bittle <guywithnose@gmail.com>
 Robert Obryk <robryk@gmail.com>
 Roberto G. Hashioka <roberto.hashioka@docker.com>
 Robin Speekenbrink <robin@kingsquare.nl>
@@ -470,25 +567,30 @@ Rovanion Luckey <rovanion.luckey@gmail.com>
 Rudolph Gottesheim <r.gottesheim@loot.at>
 Ryan Anderson <anderson.ryanc@gmail.com>
 Ryan Aslett <github@mixologic.com>
+Ryan Detzel <ryan.detzel@gmail.com>
 Ryan Fowler <rwfowler@gmail.com>
 Ryan O'Donnell <odonnellryanc@gmail.com>
 Ryan Seto <ryanseto@yak.net>
 Ryan Thomas <rthomas@atlassian.com>
-s-ko <aleks@s-ko.net>
 Sam Alba <sam.alba@gmail.com>
 Sam Bailey <cyprix@cyprix.com.au>
 Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
 Sam Reis <sreis@atlassian.com>
 Sam Rijs <srijs@airpost.net>
 Samuel Andaya <samuel@andaya.net>
+Samuel PHAN <samuel-phan@users.noreply.github.com>
 satoru <satorulogic@gmail.com>
 Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
 Scott Bessler <scottbessler@gmail.com>
 Scott Collier <emailscottcollier@gmail.com>
+Scott Johnston <scott@docker.com>
+Scott Walls <sawalls@umich.edu>
 Sean Cronin <seancron@gmail.com>
 Sean P. Kane <skane@newrelic.com>
 Sebastiaan van Stijn <github@gone.nl>
-Sebastiaan van Stijn <thaJeztah@users.noreply.github.com>
+Sébastien Luttringer <seblu@seblu.net>
+Sébastien <sebastien@yoozio.com>
+Sébastien Stormacq <sebsto@users.noreply.github.com>
 Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
 SeongJae Park <sj38.park@gmail.com>
 Shane Canon <scanon@lbl.gov>
@@ -496,28 +598,30 @@ shaunol <shaunol@gmail.com>
 Shawn Landden <shawn@churchofgit.com>
 Shawn Siefkas <shawn.siefkas@meredith.com>
 Shih-Yuan Lee <fourdollars@gmail.com>
+shuai-z <zs.broccoli@gmail.com>
 Silas Sewell <silas@sewell.org>
 Simon Taranto <simon.taranto@gmail.com>
 Sindhu S <sindhus@live.in>
 Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
+s-ko <aleks@s-ko.net>
 Solomon Hykes <solomon@docker.com>
 Song Gao <song@gao.io>
 Soulou <leo@unbekandt.eu>
 soulshake <amy@gandi.net>
 Sridatta Thatipamala <sthatipamala@gmail.com>
 Sridhar Ratnakumar <sridharr@activestate.com>
+Srini Brahmaroutu <sbrahma@us.ibm.com>
 Steeve Morin <steeve.morin@gmail.com>
 Stefan Praszalowicz <stefan@greplin.com>
 Stephen Crosby <stevecrozz@gmail.com>
 Steven Burgess <steven.a.burgess@hotmail.com>
+Steven Merrill <steven.merrill@gmail.com>
 sudosurootdev <sudosurootdev@gmail.com>
-Sven Dowideit <svendowideit@home.org.au>
+Sven Dowideit <SvenDowideit@home.org.au>
 Sylvain Bellemare <sylvain.bellemare@ezeep.com>
-Sébastien <sebastien@yoozio.com>
-Sébastien Luttringer <seblu@seblu.net>
-Sébastien Stormacq <sebsto@users.noreply.github.com>
 tang0th <tang0th@gmx.com>
 Tatsuki Sugiura <sugi@nemui.org>
+Ted M. Young <tedyoung@gmail.com>
 Tehmasp Chaudhri <tehmasp@gmail.com>
 Thatcher Peskens <thatcher@docker.com>
 Thermionix <bond711@gmail.com>
@@ -526,25 +630,32 @@ Thomas Bikeev <thomas.bikeev@mac.com>
 Thomas Frössman <thomasf@jossystem.se>
 Thomas Hansen <thomas.hansen@gmail.com>
 Thomas LEVEIL <thomasleveil@gmail.com>
+Thomas Orozco <thomas@orozco.fr>
 Thomas Schroeter <thomas@cliqz.com>
 Tianon Gravi <admwiggin@gmail.com>
 Tibor Vass <teabee89@gmail.com>
 Tim Bosse <taim@bosboot.org>
+Tim Hockin <thockin@google.com>
+Timothy Hobbs <timothyhobbs@seznam.cz>
 Tim Ruffles <oi@truffles.me.uk>
-Tim Ruffles <timruffles@googlemail.com>
+Tim Smith <timbot@google.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
-Timothy Hobbs <timothyhobbs@seznam.cz>
 tjmehta <tj@init.me>
+tjwebb123 <tjwebb123@users.noreply.github.com>
+tobe <tobegit3hub@gmail.com>
 Tobias Bieniek <Tobias.Bieniek@gmx.de>
 Tobias Gesellchen <tobias@gesellix.de>
 Tobias Schmidt <ts@soundcloud.com>
 Tobias Schwab <tobias.schwab@dynport.de>
 Todd Lunter <tlunter@gmail.com>
+Tomasz Lipinski <tlipinski@users.noreply.github.com>
 Tom Fotherby <tom+github@peopleperhour.com>
 Tom Hulihan <hulihan.tom159@gmail.com>
 Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
 Tommaso Visconti <tommaso.visconti@gmail.com>
+Tonis Tiigi <tonistiigi@gmail.com>
 Tony Daws <tony@daws.ca>
+Torstein Husebø <torstein@huseboe.net>
 tpng <benny.tpng@gmail.com>
 Travis Cline <travis.cline@gmail.com>
 Trent Ogren <tedwardo2@gmail.com>
@@ -560,33 +671,43 @@ Victor Vieux <victor.vieux@docker.com>
 Viktor Vojnovski <viktor.vojnovski@amadeus.com>
 Vincent Batts <vbatts@redhat.com>
 Vincent Bernat <bernat@luffy.cx>
+Vincent Bernat <Vincent.Bernat@exoscale.ch>
+Vincent Giersch <vincent.giersch@ovh.net>
 Vincent Mayers <vincent.mayers@inbloom.org>
 Vincent Woo <me@vincentwoo.com>
 Vinod Kulkarni <vinod.kulkarni@gmail.com>
+Vishal Doshi <vishal.doshi@gmail.com>
 Vishnu Kannan <vishnuk@google.com>
 Vitor Monteiro <vmrmonteiro@gmail.com>
 Vivek Agarwal <me@vivek.im>
+Vivek Dasgupta <vdasgupt@redhat.com>
+Vivek Goyal <vgoyal@redhat.com>
 Vladimir Bulyga <xx@ccxx.cc>
 Vladimir Kirillov <proger@wilab.org.ua>
 Vladimir Rutsky <altsysrq@gmail.com>
+Vojtech Vitek (V-Teq) <vvitek@redhat.com>
 waitingkuo <waitingkuo0527@gmail.com>
 Walter Leibbrandt <github@wrl.co.za>
 Walter Stanish <walter@pratyeka.org>
+Ward Vandewege <ward@jhvc.com>
 WarheadsSE <max@warheads.net>
 Wes Morgan <cap10morgan@gmail.com>
 Will Dietz <w@wdtz.org>
-Will Rouesnel <w.rouesnel@gmail.com>
-Will Weaver <monkey@buildingbananas.com>
 William Delanoue <william.delanoue@gmail.com>
 William Henry <whenry@redhat.com>
 William Riancho <wr.wllm@gmail.com>
 William Thurston <thurstw@amazon.com>
+Will Rouesnel <w.rouesnel@gmail.com>
+Will Weaver <monkey@buildingbananas.com>
 wyc <wayne@neverfear.org>
 Xiuming Chen <cc@cxm.cc>
+xuzhaokui <cynicholas@gmail.com>
 Yang Bai <hamo.by@gmail.com>
 Yasunori Mahata <nori@mahata.net>
+Yohei Ueda <yohei@jp.ibm.com>
 Yurii Rashkovskii <yrashk@gmail.com>
 Zac Dover <zdover@redhat.com>
+Zach Borboa <zachborboa@gmail.com>
 Zain Memon <zain@inzain.net>
 Zaiste! <oh@zaiste.net>
 Zane DeGraffenried <zane.deg@gmail.com>
@@ -594,4 +715,4 @@ Zilin Du <zilin.du@gmail.com>
 zimbatm <zimbatm@zimbatm.com>
 Zoltan Tombol <zoltan.tombol@gmail.com>
 zqh <zqhxuyuan@gmail.com>
-Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
+尹吉峰 <jifeng.yin@gmail.com>

+ 1 - 1
CHANGELOG.md

@@ -46,7 +46,7 @@
 #### Builder
 - Fix escaping `$` for environment variables
 - Fix issue with lowercase `onbuild` Dockerfile instruction
-- Restrict envrionment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER`
+- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER`
 
 ## 1.3.0 (2014-10-14)
 

+ 23 - 10
CONTRIBUTING.md

@@ -6,27 +6,36 @@ feels wrong or incomplete.
 
 ## Topics
 
-* [Security Reports](#security-reports)
+* [Reporting Security Issues](#reporting-security-issues)
 * [Design and Cleanup Proposals](#design-and-cleanup-proposals)
 * [Reporting Issues](#reporting-issues)
 * [Build Environment](#build-environment)
 * [Contribution Guidelines](#contribution-guidelines)
 * [Community Guidelines](#docker-community-guidelines)
 
-## Security Reports
+## Reporting Security Issues
 
-Please **DO NOT** file an issue for security related issues. Please send your
-reports to [security@docker.com](mailto:security@docker.com) instead.
+The Docker maintainers take security very seriously. If you discover a security issue,
+please bring it to their attention right away!
+
+Please send your report privately to [security@docker.com](mailto:security@docker.com),
+please **DO NOT** file a public issue.
+
+Security reports are greatly appreciated and we will publicly thank you for it. We also
+like to send gifts - if you're into Docker shwag make sure to let us know :)
+We currently do not offer a paid security bounty program, but are not ruling it out in
+the future.
 
 ## Design and Cleanup Proposals
 
 When considering a design proposal, we are looking for:
 
 * A description of the problem this design proposal solves
-* An issue -- not a pull request -- that describes what you will take action on
+* A pull request, not an issue, that modifies the documentation describing
+  the feature you are proposing, adding new documentation if necessary.
   * Please prefix your issue with `Proposal:` in the title
-* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open)
-  before reporting a new issue. You can always pair with someone if you both
+* Please review [the existing Proposals](https://github.com/docker/docker/pulls?q=is%3Aopen+is%3Apr+label%3AProposal)
+  before reporting a new one. You can always pair with someone if you both
   have the same idea.
 
 When considering a cleanup task, we are looking for:
@@ -39,6 +48,10 @@ When considering a cleanup task, we are looking for:
 
 ## Reporting Issues
 
+A great way to contribute to the project is to send a detailed report when you
+encounter an issue. We always appreciate a well-written, thorough bug report,
+and will thank you for it!
+
 When reporting [issues](https://github.com/docker/docker/issues) on
 GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc).
 Please include:
@@ -62,7 +75,7 @@ docs](http://docs.docker.com/contributing/devenvironment/).
 ### Pull requests are always welcome
 
 We are always thrilled to receive pull requests, and do our best to
-process them as fast as possible. Not sure if that typo is worth a pull
+process them as quickly as possible. Not sure if that typo is worth a pull
 request? Do it! We will appreciate it.
 
 If your pull request is not accepted on the first try, don't be
@@ -159,7 +172,7 @@ component affected. For example, if a change affects `docs/` and `registry/`, it
 needs an absolute majority from the maintainers of `docs/` AND, separately, an
 absolute majority of the maintainers of `registry/`.
 
-For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
+For more details see [MAINTAINERS.md](project/MAINTAINERS.md)
 
 ### Sign your work
 
@@ -310,7 +323,7 @@ do need a fair way to deal with people who are making our community suck.
   will be addressed immediately and are not subject to 3 strikes or
   forgiveness.
 
-* Contact james@docker.com to report abuse or appeal violations. In the case of
+* Contact abuse@docker.com to report abuse or appeal violations. In the case of
   appeals, we know that mistakes happen, and we'll work with you to come up with
   a fair solution if there has been a misunderstanding.
 

+ 5 - 3
Dockerfile

@@ -23,7 +23,6 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-docker-version	0.6.1
 FROM	ubuntu:14.04
 MAINTAINER	Tianon Gravi <admwiggin@gmail.com> (@tianon)
 
@@ -69,7 +68,10 @@ RUN	cd /usr/local/go/src && ./make.bash --no-clean 2>&1
 ENV	DOCKER_CROSSPLATFORMS	\
 	linux/386 linux/arm \
 	darwin/amd64 darwin/386 \
-	freebsd/amd64 freebsd/386 freebsd/arm
+	freebsd/amd64 freebsd/386 freebsd/arm 
+#	windows is experimental for now
+#	windows/amd64 windows/386
+
 # (set an explicit GOARM of 5 for maximum compatibility)
 ENV	GOARM	5
 RUN	cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
@@ -104,7 +106,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
 
 VOLUME	/var/lib/docker
 WORKDIR	/go/src/github.com/docker/docker
-ENV	DOCKER_BUILDTAGS	apparmor selinux
+ENV	DOCKER_BUILDTAGS	apparmor selinux btrfs_noversion
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT	["hack/dind"]

+ 25 - 6
Makefile

@@ -1,20 +1,39 @@
 .PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate
 
+# env vars passed through directly to Docker's build scripts
+# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
+# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these
+DOCKER_ENVS := \
+	-e BUILDFLAGS \
+	-e DOCKER_CLIENTONLY \
+	-e DOCKER_EXECDRIVER \
+	-e DOCKER_GRAPHDRIVER \
+	-e TESTDIRS \
+	-e TESTFLAGS \
+	-e TIMEOUT
+# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
+
 # to allow `make BINDDIR=. shell` or `make BINDDIR= test`
 # (default to no bind mount if DOCKER_HOST is set)
 BINDDIR := $(if $(DOCKER_HOST),,bundles)
+DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
+
+# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
+DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
+
 # to allow `make DOCSPORT=9000 docs`
 DOCSPORT := 8000
 
 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
-GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
 DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
 DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
-DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
 
-DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
-# to allow `make DOCSDIR=docs docs-shell`
-DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET
+DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
+
+DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET
+
+# for some docs workarounds (see below in "docs-build" target)
+GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
 
 default: binary
 
@@ -34,7 +53,7 @@ docs-shell: docs-build
 	$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
 
 docs-release: docs-build
-	$(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh
+	$(DOCKER_RUN_DOCS) -e OPTIONS -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh
 
 test: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli

+ 8 - 5
README.md

@@ -178,13 +178,14 @@ Contributing to Docker
 ======================
 
 [![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker)
-[![Travis](https://travis-ci.org/docker/docker.svg?branch=master)](https://travis-ci.org/docker/docker)
+[![Build Status](https://ci.dockerproject.com/github.com/docker/docker/status.svg?branch=master)](https://ci.dockerproject.com/github.com/docker/docker)
 
 Want to hack on Docker? Awesome! There are instructions to get you
-started [here](CONTRIBUTING.md).
+started [here](CONTRIBUTING.md). If you'd like to contribute to the
+documentation, please take a look at this [README.md](https://github.com/docker/docker/blob/master/docs/README.md).
 
-They are probably not perfect, please let us know if anything feels
-wrong or incomplete.
+These instructions are probably not perfect, please let us know if anything
+feels wrong or incomplete.
 
 ### Legal
 
@@ -201,5 +202,7 @@ For more information, please see http://www.bis.doc.gov
 
 Licensing
 =========
-Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text.
+Docker is licensed under the Apache License, Version 2.0. See
+[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
+license text.
 

+ 1 - 1
VERSION

@@ -1 +1 @@
-1.3.3
+1.3.3-dev

+ 1 - 0
api/MAINTAINERS

@@ -1 +1,2 @@
 Victor Vieux <vieux@docker.com> (@vieux)
+Jessie Frazelle <jess@docker.com> (@jfrazelle)

+ 35 - 2
api/client/cli.go

@@ -3,12 +3,16 @@ package client
 import (
 	"crypto/tls"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"io"
+	"net"
+	"net/http"
 	"os"
 	"reflect"
 	"strings"
 	"text/template"
+	"time"
 
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/term"
@@ -34,6 +38,7 @@ type DockerCli struct {
 	isTerminalIn bool
 	// isTerminalOut describes if client's STDOUT is a TTY
 	isTerminalOut bool
+	transport     *http.Transport
 }
 
 var funcMap = template.FuncMap{
@@ -71,11 +76,11 @@ func (cli *DockerCli) Cmd(args ...string) error {
 		method, exists := cli.getMethod(args[0])
 		if !exists {
 			fmt.Println("Error: Command not found:", args[0])
-			return cli.CmdHelp(args[1:]...)
+			return cli.CmdHelp()
 		}
 		return method(args[1:]...)
 	}
-	return cli.CmdHelp(args...)
+	return cli.CmdHelp()
 }
 
 func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
@@ -100,6 +105,16 @@ func (cli *DockerCli) LoadConfigFile() (err error) {
 	return err
 }
 
+func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error {
+	// In order to attach to a container tty, input stream for the client must
+	// be a tty itself: redirecting or piping the client standard input is
+	// incompatible with `docker run -t`, `docker exec -t` or `docker attach`.
+	if ttyMode && attachStdin && !cli.isTerminalIn {
+		return errors.New("cannot enable tty mode on non tty input")
+	}
+	return nil
+}
+
 func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, proto, addr string, tlsConfig *tls.Config) *DockerCli {
 	var (
 		inFd          uintptr
@@ -131,6 +146,23 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey,
 		err = out
 	}
 
+	// The transport is created here for reuse during the client session
+	tr := &http.Transport{
+		TLSClientConfig: tlsConfig,
+	}
+
+	// Why 32? See issue 8035
+	timeout := 32 * time.Second
+	if proto == "unix" {
+		// no need in compressing for local communications
+		tr.DisableCompression = true
+		tr.Dial = func(_, _ string) (net.Conn, error) {
+			return net.DialTimeout(proto, addr, timeout)
+		}
+	} else {
+		tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
+	}
+
 	return &DockerCli{
 		proto:         proto,
 		addr:          addr,
@@ -144,5 +176,6 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey,
 		isTerminalOut: isTerminalOut,
 		tlsConfig:     tlsConfig,
 		scheme:        scheme,
+		transport:     tr,
 	}
 }

+ 206 - 61
api/client/commands.go

@@ -5,6 +5,7 @@ import (
 	"bytes"
 	"encoding/base64"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -17,11 +18,11 @@ import (
 	"runtime"
 	"strconv"
 	"strings"
-	"syscall"
 	"text/tabwriter"
 	"text/template"
 	"time"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/engine"
@@ -29,7 +30,6 @@ import (
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/log"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers/filters"
@@ -38,6 +38,7 @@ import (
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/pkg/timeutils"
 	"github.com/docker/docker/pkg/units"
+	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
@@ -47,6 +48,10 @@ const (
 	tarHeaderSize = 512
 )
 
+var (
+	acceptedImageFilterTags = map[string]struct{}{"dangling": {}}
+)
+
 func (cli *DockerCli) CmdHelp(args ...string) error {
 	if len(args) > 1 {
 		method, exists := cli.getMethod(args[:2]...)
@@ -77,6 +82,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
 	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
 	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds")
+	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -110,13 +116,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 		} else {
 			context = ioutil.NopCloser(buf)
 		}
-	} else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) {
+	} else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) {
 		isRemote = true
 	} else {
 		root := cmd.Arg(0)
-		if utils.IsGIT(root) {
+		if urlutil.IsGitURL(root) {
 			remoteURL := cmd.Arg(0)
-			if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) {
+			if !urlutil.IsGitTransport(remoteURL) {
 				remoteURL = "https://" + remoteURL
 			}
 
@@ -143,6 +149,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 			return fmt.Errorf("Error reading .dockerignore: '%s'", err)
 		}
 		for _, pattern := range strings.Split(string(ignore), "\n") {
+			pattern = strings.TrimSpace(pattern)
+			if pattern == "" {
+				continue
+			}
+			pattern = filepath.Clean(pattern)
 			ok, err := filepath.Match(pattern, "Dockerfile")
 			if err != nil {
 				return fmt.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err)
@@ -169,7 +180,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	// FIXME: ProgressReader shouldn't be this annoying to use
 	if context != nil {
 		sf := utils.NewStreamFormatter(false)
-		body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Sending build context to Docker daemon")
+		body = utils.ProgressReader(context, 0, cli.out, sf, true, "", "Sending build context to Docker daemon")
 	}
 	// Send the build context
 	v := &url.Values{}
@@ -208,6 +219,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 		v.Set("forcerm", "1")
 	}
 
+	if *pull {
+		v.Set("pull", "1")
+	}
 	cli.LoadConfigFile()
 
 	headers := http.Header(make(map[string][]string))
@@ -284,7 +298,10 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	// the password or email from the config file, so prompt them
 	if username != authconfig.Username {
 		if password == "" {
-			oldState, _ := term.SaveState(cli.inFd)
+			oldState, err := term.SaveState(cli.inFd)
+			if err != nil {
+				return err
+			}
 			fmt.Fprintf(cli.out, "Password: ")
 			term.DisableEcho(cli.inFd, oldState)
 
@@ -467,33 +484,69 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 	}
 	out.Close()
 
-	fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
-	fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
-	fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver"))
-	var driverStatus [][2]string
-	if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
-		return err
+	if remoteInfo.Exists("Containers") {
+		fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
+	}
+	if remoteInfo.Exists("Images") {
+		fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
+	}
+	if remoteInfo.Exists("Driver") {
+		fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver"))
+	}
+	if remoteInfo.Exists("DriverStatus") {
+		var driverStatus [][2]string
+		if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
+			return err
+		}
+		for _, pair := range driverStatus {
+			fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
+		}
+	}
+	if remoteInfo.Exists("ExecutionDriver") {
+		fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
+	}
+	if remoteInfo.Exists("KernelVersion") {
+		fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
+	}
+	if remoteInfo.Exists("OperatingSystem") {
+		fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem"))
 	}
-	for _, pair := range driverStatus {
-		fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
+	if remoteInfo.Exists("NCPU") {
+		fmt.Fprintf(cli.out, "CPUs: %d\n", remoteInfo.GetInt("NCPU"))
+	}
+	if remoteInfo.Exists("MemTotal") {
+		fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal"))))
+	}
+	if remoteInfo.Exists("Name") {
+		fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name"))
+	}
+	if remoteInfo.Exists("ID") {
+		fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID"))
 	}
-	fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
-	fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
-	fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem"))
 
 	if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
-		fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
+		if remoteInfo.Exists("Debug") {
+			fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
+		}
 		fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
-		fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
-		fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
-		fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
-
+		if remoteInfo.Exists("NFd") {
+			fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
+		}
+		if remoteInfo.Exists("NGoroutines") {
+			fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
+		}
+		if remoteInfo.Exists("NEventsListener") {
+			fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
+		}
 		if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
 			fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
 		}
 		if initPath := remoteInfo.Get("InitPath"); initPath != "" {
 			fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
 		}
+		if root := remoteInfo.Get("DockerRootDir"); root != "" {
+			fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root)
+		}
 	}
 
 	if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
@@ -504,15 +557,22 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 			fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress"))
 		}
 	}
-	if !remoteInfo.GetBool("MemoryLimit") {
+	if remoteInfo.Exists("MemoryLimit") && !remoteInfo.GetBool("MemoryLimit") {
 		fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
 	}
-	if !remoteInfo.GetBool("SwapLimit") {
+	if remoteInfo.Exists("SwapLimit") && !remoteInfo.GetBool("SwapLimit") {
 		fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
 	}
-	if !remoteInfo.GetBool("IPv4Forwarding") {
+	if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") {
 		fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
 	}
+	if remoteInfo.Exists("Labels") {
+		fmt.Fprintln(cli.out, "Labels:")
+		for _, attribute := range remoteInfo.GetList("Labels") {
+			fmt.Fprintf(cli.out, " %s\n", attribute)
+		}
+	}
+
 	return nil
 }
 
@@ -575,7 +635,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
 	signal.CatchAll(sigc)
 	go func() {
 		for s := range sigc {
-			if s == syscall.SIGCHLD {
+			if s == signal.SIGCHLD {
 				continue
 			}
 			var sig string
@@ -586,7 +646,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
 				}
 			}
 			if sig == "" {
-				log.Errorf("Unsupported signal: %d. Discarding.", s)
+				log.Errorf("Unsupported signal: %v. Discarding.", s)
 			}
 			if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil {
 				log.Debugf("Error sending signal: %s", err)
@@ -614,18 +674,20 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		return nil
 	}
 
+	hijacked := make(chan io.Closer)
+
 	if *attach || *openStdin {
 		if cmd.NArg() > 1 {
 			return fmt.Errorf("You cannot start and attach multiple containers at once.")
 		}
 
-		steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)
+		stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)
 		if err != nil {
 			return err
 		}
 
 		env := engine.Env{}
-		if err := env.Decode(steam); err != nil {
+		if err := env.Decode(stream); err != nil {
 			return err
 		}
 		config := env.GetSubEnv("Config")
@@ -650,8 +712,24 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		v.Set("stderr", "1")
 
 		cErr = promise.Go(func() error {
-			return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil)
+			return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil)
 		})
+	} else {
+		close(hijacked)
+	}
+
+	// Acknowledge the hijack before starting
+	select {
+	case closer := <-hijacked:
+		// Make sure that the hijack gets closed when returning (results
+		// in closing the hijack chan and freeing server's goroutines)
+		if closer != nil {
+			defer closer.Close()
+		}
+	case err := <-cErr:
+		if err != nil {
+			return err
+		}
 	}
 
 	var encounteredError error
@@ -681,7 +759,16 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 				log.Errorf("Error monitoring TTY size: %s", err)
 			}
 		}
-		return <-cErr
+		if attchErr := <-cErr; attchErr != nil {
+			return attchErr
+		}
+		_, status, err := getExitCode(cli, cmd.Arg(0))
+		if err != nil {
+			return err
+		}
+		if status != 0 {
+			return &utils.StatusError{StatusCode: status}
+		}
 	}
 	return nil
 }
@@ -798,7 +885,7 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 		// Remove trailing ','
 		indented.Truncate(indented.Len() - 1)
 	}
-	indented.WriteByte(']')
+	indented.WriteString("]\n")
 
 	if tmpl == nil {
 		if _, err := io.Copy(cli.out, indented); err != nil {
@@ -857,13 +944,13 @@ func (cli *DockerCli) CmdPort(args ...string) error {
 		return nil
 	}
 
-	steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)
+	stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)
 	if err != nil {
 		return err
 	}
 
 	env := engine.Env{}
-	if err := env.Decode(steam); err != nil {
+	if err := env.Decode(stream); err != nil {
 		return err
 	}
 	ports := nat.PortMap{}
@@ -1195,7 +1282,7 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 	)
 	taglessRemote, tag := parsers.ParseRepositoryTag(remote)
 	if tag == "" && !*allTags {
-		newRemote = taglessRemote + ":latest"
+		newRemote = taglessRemote + ":" + graph.DEFAULTTAG
 	}
 	if tag != "" && *allTags {
 		return fmt.Errorf("tag can't be used with --all-tags/-a")
@@ -1244,7 +1331,7 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 }
 
 func (cli *DockerCli) CmdImages(args ...string) error {
-	cmd := cli.Subcmd("images", "[NAME]", "List images")
+	cmd := cli.Subcmd("images", "[REPOSITORY]", "List images")
 	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
 	all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)")
 	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
@@ -1274,6 +1361,12 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 		}
 	}
 
+	for name := range imageFilterArgs {
+		if _, ok := acceptedImageFilterTags[name]; !ok {
+			return fmt.Errorf("Invalid filter '%s'", name)
+		}
+	}
+
 	matchName := cmd.Arg(0)
 	// FIXME: --viz and --tree are deprecated. Remove them in a future version.
 	if *flViz || *flTree {
@@ -1483,7 +1576,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 
 		cmd      = cli.Subcmd("ps", "", "List containers")
 		quiet    = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
-		size     = cmd.Bool([]string{"s", "-size"}, false, "Display sizes")
+		size     = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes")
 		all      = cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.")
 		noTrunc  = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
 		nLatest  = cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.")
@@ -1692,6 +1785,10 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 	cmd := cli.Subcmd("events", "", "Get real time events from the server")
 	since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp")
 	until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp")
+
+	flFilter := opts.NewListOpts(nil)
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'event=stop')")
+
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -1701,9 +1798,20 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 		return nil
 	}
 	var (
-		v   = url.Values{}
-		loc = time.FixedZone(time.Now().Zone())
+		v               = url.Values{}
+		loc             = time.FixedZone(time.Now().Zone())
+		eventFilterArgs = filters.Args{}
 	)
+
+	// Consolidate all filter flags, and sanity check them early.
+	// They'll get process in the daemon/server.
+	for _, f := range flFilter.GetAll() {
+		var err error
+		eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs)
+		if err != nil {
+			return err
+		}
+	}
 	var setTime = func(key, value string) {
 		format := timeutils.RFC3339NanoFixed
 		if len(value) < len(format) {
@@ -1721,6 +1829,13 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 	if *until != "" {
 		setTime("until", *until)
 	}
+	if len(eventFilterArgs) > 0 {
+		filterJson, err := filters.ToParam(eventFilterArgs)
+		if err != nil {
+			return err
+		}
+		v.Set("filters", filterJson)
+	}
 	if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
 		return err
 	}
@@ -1797,13 +1912,13 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 	}
 	name := cmd.Arg(0)
 
-	steam, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false)
+	stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false)
 	if err != nil {
 		return err
 	}
 
 	env := engine.Env{}
-	if err := env.Decode(steam); err != nil {
+	if err := env.Decode(stream); err != nil {
 		return err
 	}
 
@@ -1827,7 +1942,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 	var (
 		cmd     = cli.Subcmd("attach", "CONTAINER", "Attach to a running container")
 		noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
-		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.")
+		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.")
 	)
 
 	if err := cmd.Parse(args); err != nil {
@@ -1859,6 +1974,10 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 		tty    = config.GetBool("Tty")
 	)
 
+	if err := cli.CheckTtyInput(!*noStdin, tty); err != nil {
+		return err
+	}
+
 	if tty && cli.isTerminalOut {
 		if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
 			log.Debugf("Error monitoring TTY size: %s", err)
@@ -1994,7 +2113,7 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error {
 	repos, tag := parsers.ParseRepositoryTag(image)
 	// pull only the image tagged 'latest' if no tag was specified
 	if tag == "" {
-		tag = "latest"
+		tag = graph.DEFAULTTAG
 	}
 	v.Set("fromImage", repos)
 	v.Set("tag", tag)
@@ -2083,7 +2202,11 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc
 	stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false)
 	//if image not found try to pull it
 	if statusCode == 404 {
-		fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image)
+		repo, tag := parsers.ParseRepositoryTag(config.Image)
+		if tag == "" {
+			tag = graph.DEFAULTTAG
+		}
+		fmt.Fprintf(cli.err, "Unable to find image '%s:%s' locally\n", repo, tag)
 
 		// we don't want to write to stdout anything apart from container.ID
 		if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil {
@@ -2124,7 +2247,7 @@ func (cli *DockerCli) CmdCreate(args ...string) error {
 		flName = cmd.String([]string{"-name"}, "", "Assign a name to the container")
 	)
 
-	config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil)
+	config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
 	if err != nil {
 		return err
 	}
@@ -2151,7 +2274,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	var (
 		flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)")
 		flDetach     = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run the container in the background and print the new container ID")
-		flSigProxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.")
+		flSigProxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.")
 		flName       = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
 		flAttach     *opts.ListOpts
 
@@ -2160,7 +2283,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		ErrConflictDetachAutoRemove           = fmt.Errorf("Conflicting options: --rm and -d")
 	)
 
-	config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil)
+	config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
 	if err != nil {
 		return err
 	}
@@ -2169,7 +2292,11 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		return nil
 	}
 
-	if *flDetach {
+	if !*flDetach {
+		if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil {
+			return err
+		}
+	} else {
 		if fl := cmd.Lookup("attach"); fl != nil {
 			flAttach = fl.Value.(*opts.ListOpts)
 			if flAttach.Len() != 0 {
@@ -2186,7 +2313,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		config.StdinOnce = false
 	}
 
-	// Disable flSigProxy in case on TTY
+	// Disable flSigProxy when in TTY mode
 	sigProxy := *flSigProxy
 	if config.Tty {
 		sigProxy = false
@@ -2208,7 +2335,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	)
 
 	if !config.AttachStdout && !config.AttachStderr {
-		// Make this asynchrone in order to let the client write to stdin before having to read the ID
+		// Make this asynchronous to allow the client to write to stdin before having to read the ID
 		waitDisplayId = make(chan struct{})
 		go func() {
 			defer close(waitDisplayId)
@@ -2220,7 +2347,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		return ErrConflictRestartPolicyAndAutoRemove
 	}
 
-	// We need to instanciate the chan because the select needs it. It can
+	// We need to instantiate the chan because the select needs it. It can
 	// be closed but can't be uninitialized.
 	hijacked := make(chan io.Closer)
 
@@ -2267,8 +2394,8 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	// Acknowledge the hijack before starting
 	select {
 	case closer := <-hijacked:
-		// Make sure that hijack gets closed when returning. (result
-		// in closing hijack chan and freeing server's goroutines.
+		// Make sure that the hijack gets closed when returning (results
+		// in closing the hijack chan and freeing server's goroutines)
 		if closer != nil {
 			defer closer.Close()
 		}
@@ -2280,7 +2407,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	}
 
 	//start the container
-	if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil {
+	if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", nil, false)); err != nil {
 		return err
 	}
 
@@ -2320,15 +2447,15 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 			return err
 		}
 	} else {
+		// No Autoremove: Simply retrieve the exit code
 		if !config.Tty {
-			// In non-tty mode, we can't dettach, so we know we need to wait.
+			// In non-TTY mode, we can't detach, so we must wait for container exit
 			if status, err = waitForExit(cli, runResult.Get("Id")); err != nil {
 				return err
 			}
 		} else {
-			// In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call
-			// and result in a wrong exit code.
-			// No Autoremove: Simply retrieve the exit code
+			// In TTY mode, there is a race: if the process dies too slowly, the state could
+			// be updated after the getExitCode call and result in the wrong exit code being reported
 			if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil {
 				return err
 			}
@@ -2402,7 +2529,10 @@ func (cli *DockerCli) CmdSave(args ...string) error {
 		if err != nil {
 			return err
 		}
+	} else if cli.isTerminalOut {
+		return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
 	}
+
 	if len(cmd.Args()) == 1 {
 		image := cmd.Arg(0)
 		if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil {
@@ -2450,7 +2580,7 @@ func (cli *DockerCli) CmdLoad(args ...string) error {
 }
 
 func (cli *DockerCli) CmdExec(args ...string) error {
-	cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in an existing container")
+	cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in a running container")
 
 	execConfig, err := runconfig.ParseExec(cmd, args)
 	if err != nil {
@@ -2478,10 +2608,16 @@ func (cli *DockerCli) CmdExec(args ...string) error {
 		return nil
 	}
 
-	if execConfig.Detach {
+	if !execConfig.Detach {
+		if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil {
+			return err
+		}
+	} else {
 		if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, false)); err != nil {
 			return err
 		}
+		// For now don't print this - wait for when we support exec wait()
+		// fmt.Fprintf(cli.out, "%s\n", execID)
 		return nil
 	}
 
@@ -2544,5 +2680,14 @@ func (cli *DockerCli) CmdExec(args ...string) error {
 		return err
 	}
 
+	var status int
+	if _, status, err = getExecExitCode(cli, execID); err != nil {
+		return err
+	}
+
+	if status != 0 {
+		return &utils.StatusError{StatusCode: status}
+	}
+
 	return nil
 }

+ 1 - 1
api/client/hijack.go

@@ -13,9 +13,9 @@ import (
 	"strings"
 	"time"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/dockerversion"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/term"

+ 32 - 24
api/client/utils.go

@@ -8,20 +8,18 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
-	"net"
 	"net/http"
 	"net/url"
 	"os"
 	gosignal "os/signal"
 	"strconv"
 	"strings"
-	"syscall"
-	"time"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/registry"
@@ -33,22 +31,7 @@ var (
 )
 
 func (cli *DockerCli) HTTPClient() *http.Client {
-	tr := &http.Transport{
-		TLSClientConfig: cli.tlsConfig,
-		Dial: func(network, addr string) (net.Conn, error) {
-			// Why 32? See issue 8035
-			return net.DialTimeout(cli.proto, cli.addr, 32*time.Second)
-		},
-	}
-	if cli.proto == "unix" {
-		// XXX workaround for net/http Transport which caches connections, but is
-		// intended for tcp connections, not unix sockets.
-		tr.DisableKeepAlives = true
-
-		// no need in compressing for local communications
-		tr.DisableCompression = true
-	}
-	return &http.Client{Transport: tr}
+	return &http.Client{Transport: cli.transport}
 }
 
 func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) {
@@ -113,7 +96,12 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b
 		if strings.Contains(err.Error(), "connection refused") {
 			return nil, -1, ErrConnectionRefused
 		}
-		return nil, -1, err
+
+		if cli.tlsConfig == nil {
+			return nil, -1, fmt.Errorf("%v. Are you trying to connect to a TLS-enabled daemon without TLS?", err)
+		}
+		return nil, -1, fmt.Errorf("An error occurred trying to connect: %v", err)
+
 	}
 
 	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
@@ -228,7 +216,7 @@ func waitForExit(cli *DockerCli, containerId string) (int, error) {
 // getExitCode perform an inspect on the container. It returns
 // the running state and the exit code.
 func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
-	steam, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false)
+	stream, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false)
 	if err != nil {
 		// If we can't connect, then the daemon probably died.
 		if err != ErrConnectionRefused {
@@ -238,7 +226,7 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
 	}
 
 	var result engine.Env
-	if err := result.Decode(steam); err != nil {
+	if err := result.Decode(stream); err != nil {
 		return false, -1, err
 	}
 
@@ -246,11 +234,31 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
 	return state.GetBool("Running"), state.GetInt("ExitCode"), nil
 }
 
+// getExecExitCode perform an inspect on the exec command. It returns
+// the running state and the exit code.
+func getExecExitCode(cli *DockerCli, execId string) (bool, int, error) {
+	stream, _, err := cli.call("GET", "/exec/"+execId+"/json", nil, false)
+	if err != nil {
+		// If we can't connect, then the daemon probably died.
+		if err != ErrConnectionRefused {
+			return false, -1, err
+		}
+		return false, -1, nil
+	}
+
+	var result engine.Env
+	if err := result.Decode(stream); err != nil {
+		return false, -1, err
+	}
+
+	return result.GetBool("Running"), result.GetInt("ExitCode"), nil
+}
+
 func (cli *DockerCli) monitorTtySize(id string, isExec bool) error {
 	cli.resizeTty(id, isExec)
 
 	sigchan := make(chan os.Signal, 1)
-	gosignal.Notify(sigchan, syscall.SIGWINCH)
+	gosignal.Notify(sigchan, signal.SIGWINCH)
 	go func() {
 		for _ = range sigchan {
 			cli.resizeTty(id, isExec)

+ 27 - 2
api/common.go

@@ -3,16 +3,19 @@ package api
 import (
 	"fmt"
 	"mime"
+	"os"
+	"path"
 	"strings"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/version"
+	"github.com/docker/libtrust"
 )
 
 const (
-	APIVERSION        version.Version = "1.15"
+	APIVERSION        version.Version = "1.16"
 	DEFAULTHTTPHOST                   = "127.0.0.1"
 	DEFAULTUNIXSOCKET                 = "/var/run/docker.sock"
 )
@@ -47,3 +50,25 @@ func MatchesContentType(contentType, expectedType string) bool {
 	}
 	return err == nil && mimetype == expectedType
 }
+
+// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
+// otherwise generates a new one
+func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
+	err := os.MkdirAll(path.Dir(trustKeyPath), 0700)
+	if err != nil {
+		return nil, err
+	}
+	trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
+	if err == libtrust.ErrKeyFileDoesNotExist {
+		trustKey, err = libtrust.GenerateECP256PrivateKey()
+		if err != nil {
+			return nil, fmt.Errorf("Error generating key: %s", err)
+		}
+		if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil {
+			return nil, fmt.Errorf("Error saving key file: %s", err)
+		}
+	} else if err != nil {
+		return nil, fmt.Errorf("Error loading key file: %s", err)
+	}
+	return trustKey, nil
+}

+ 171 - 111
api/server/server.go

@@ -3,8 +3,7 @@ package server
 import (
 	"bufio"
 	"bytes"
-	"crypto/tls"
-	"crypto/x509"
+
 	"encoding/base64"
 	"encoding/json"
 	"expvar"
@@ -19,14 +18,17 @@ import (
 	"strings"
 	"syscall"
 
+	"crypto/tls"
+	"crypto/x509"
+
 	"code.google.com/p/go.net/websocket"
 	"github.com/docker/libcontainer/user"
 	"github.com/gorilla/mux"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/listenbuffer"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/systemd"
@@ -39,6 +41,18 @@ var (
 	activationLock chan struct{}
 )
 
+type HttpServer struct {
+	srv *http.Server
+	l   net.Listener
+}
+
+func (s *HttpServer) Serve() error {
+	return s.srv.Serve(s.l)
+}
+func (s *HttpServer) Close() error {
+	return s.l.Close()
+}
+
 type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error
 
 func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
@@ -51,6 +65,18 @@ func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
 	return conn, conn, nil
 }
 
+func closeStreams(streams ...interface{}) {
+	for _, stream := range streams {
+		if tcpc, ok := stream.(interface {
+			CloseWrite() error
+		}); ok {
+			tcpc.CloseWrite()
+		} else if closer, ok := stream.(io.Closer); ok {
+			closer.Close()
+		}
+	}
+}
+
 // Check to make sure request's Content-Type is application/json
 func checkForJson(r *http.Request) error {
 	ct := r.Header.Get("Content-Type")
@@ -92,17 +118,18 @@ func httpError(w http.ResponseWriter, err error) {
 	// FIXME: this is brittle and should not be necessary.
 	// If we need to differentiate between different possible error types, we should
 	// create appropriate error types with clearly defined meaning.
-	if strings.Contains(err.Error(), "No such") {
+	errStr := strings.ToLower(err.Error())
+	if strings.Contains(errStr, "no such") {
 		statusCode = http.StatusNotFound
-	} else if strings.Contains(err.Error(), "Bad parameter") {
+	} else if strings.Contains(errStr, "bad parameter") {
 		statusCode = http.StatusBadRequest
-	} else if strings.Contains(err.Error(), "Conflict") {
+	} else if strings.Contains(errStr, "conflict") {
 		statusCode = http.StatusConflict
-	} else if strings.Contains(err.Error(), "Impossible") {
+	} else if strings.Contains(errStr, "impossible") {
 		statusCode = http.StatusNotAcceptable
-	} else if strings.Contains(err.Error(), "Wrong login/password") {
+	} else if strings.Contains(errStr, "wrong login/password") {
 		statusCode = http.StatusUnauthorized
-	} else if strings.Contains(err.Error(), "hasn't been activated") {
+	} else if strings.Contains(errStr, "hasn't been activated") {
 		statusCode = http.StatusForbidden
 	}
 
@@ -300,6 +327,7 @@ func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWrite
 	streamJSON(job, w, true)
 	job.Setenv("since", r.Form.Get("since"))
 	job.Setenv("until", r.Form.Get("until"))
+	job.Setenv("filters", r.Form.Get("filters"))
 	return job.Run()
 }
 
@@ -855,20 +883,7 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re
 	if err != nil {
 		return err
 	}
-	defer func() {
-		if tcpc, ok := inStream.(*net.TCPConn); ok {
-			tcpc.CloseWrite()
-		} else {
-			inStream.Close()
-		}
-	}()
-	defer func() {
-		if tcpc, ok := outStream.(*net.TCPConn); ok {
-			tcpc.CloseWrite()
-		} else if closer, ok := outStream.(io.Closer); ok {
-			closer.Close()
-		}
-	}()
+	defer closeStreams(inStream, outStream)
 
 	var errStream io.Writer
 
@@ -941,6 +956,15 @@ func getContainersByName(eng *engine.Engine, version version.Version, w http.Res
 	return job.Run()
 }
 
+func getExecByID(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if vars == nil {
+		return fmt.Errorf("Missing parameter 'id'")
+	}
+	var job = eng.Job("execInspect", vars["id"])
+	streamJSON(job, w, false)
+	return job.Run()
+}
+
 func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
@@ -1001,6 +1025,9 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
 	} else {
 		job.Setenv("rm", r.FormValue("rm"))
 	}
+	if r.FormValue("pull") == "1" && version.GreaterThanOrEqualTo("1.16") {
+		job.Setenv("pull", "1")
+	}
 	job.Stdin.Add(r.Body)
 	job.Setenv("remote", r.FormValue("remote"))
 	job.Setenv("t", r.FormValue("t"))
@@ -1050,7 +1077,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
 	w.Header().Set("Content-Type", "application/x-tar")
 	if err := job.Run(); err != nil {
 		log.Errorf("%s", err.Error())
-		if strings.Contains(err.Error(), "No such container") {
+		if strings.Contains(strings.ToLower(err.Error()), "no such container") {
 			w.WriteHeader(http.StatusNotFound)
 		} else if strings.Contains(err.Error(), "no such file or directory") {
 			return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"])
@@ -1106,21 +1133,7 @@ func postContainerExecStart(eng *engine.Engine, version version.Version, w http.
 		if err != nil {
 			return err
 		}
-
-		defer func() {
-			if tcpc, ok := inStream.(*net.TCPConn); ok {
-				tcpc.CloseWrite()
-			} else {
-				inStream.Close()
-			}
-		}()
-		defer func() {
-			if tcpc, ok := outStream.(*net.TCPConn); ok {
-				tcpc.CloseWrite()
-			} else if closer, ok := outStream.(io.Closer); ok {
-				closer.Close()
-			}
-		}()
+		defer closeStreams(inStream, outStream)
 
 		var errStream io.Writer
 
@@ -1166,7 +1179,7 @@ func optionsHandler(eng *engine.Engine, version version.Version, w http.Response
 }
 func writeCorsHeaders(w http.ResponseWriter, r *http.Request) {
 	w.Header().Add("Access-Control-Allow-Origin", "*")
-	w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
+	w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
 	w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
 }
 
@@ -1231,6 +1244,7 @@ func AttachProfiler(router *mux.Router) {
 	router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
 	router.HandleFunc("/debug/pprof/profile", pprof.Profile)
 	router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
+	router.HandleFunc("/debug/pprof/block", pprof.Handler("block").ServeHTTP)
 	router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP)
 	router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
 	router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
@@ -1262,6 +1276,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
 			"/containers/{name:.*}/top":       getContainersTop,
 			"/containers/{name:.*}/logs":      getContainersLogs,
 			"/containers/{name:.*}/attach/ws": wsContainersAttach,
+			"/exec/{id:.*}/json":              getExecByID,
 		},
 		"POST": {
 			"/auth":                         postAuth,
@@ -1333,9 +1348,14 @@ func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.Respons
 	return nil
 }
 
-// ServeFD creates an http.Server and sets it up to serve given a socket activated
+// serveFd creates an http.Server and sets it up to serve given a socket activated
 // argument.
-func ServeFd(addr string, handle http.Handler) error {
+func serveFd(addr string, job *engine.Job) error {
+	r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
+	if err != nil {
+		return err
+	}
+
 	ls, e := systemd.ListenFD(addr)
 	if e != nil {
 		return e
@@ -1353,7 +1373,7 @@ func ServeFd(addr string, handle http.Handler) error {
 	for i := range ls {
 		listener := ls[i]
 		go func() {
-			httpSrv := http.Server{Handler: handle}
+			httpSrv := http.Server{Handler: r}
 			chErrors <- httpSrv.Serve(listener)
 		}()
 	}
@@ -1369,7 +1389,11 @@ func ServeFd(addr string, handle http.Handler) error {
 }
 
 func lookupGidByName(nameOrGid string) (int, error) {
-	groups, err := user.ParseGroupFilter(func(g *user.Group) bool {
+	groupFile, err := user.GetGroupFile()
+	if err != nil {
+		return -1, err
+	}
+	groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool {
 		return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid
 	})
 	if err != nil {
@@ -1381,6 +1405,41 @@ func lookupGidByName(nameOrGid string) (int, error) {
 	return -1, fmt.Errorf("Group %s not found", nameOrGid)
 }
 
+func setupTls(cert, key, ca string, l net.Listener) (net.Listener, error) {
+	tlsCert, err := tls.LoadX509KeyPair(cert, key)
+	if err != nil {
+		return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?",
+			cert, key, err)
+	}
+	tlsConfig := &tls.Config{
+		NextProtos:   []string{"http/1.1"},
+		Certificates: []tls.Certificate{tlsCert},
+		// Avoid fallback on insecure SSL protocols
+		MinVersion: tls.VersionTLS10,
+	}
+
+	if ca != "" {
+		certPool := x509.NewCertPool()
+		file, err := ioutil.ReadFile(ca)
+		if err != nil {
+			return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
+		}
+		certPool.AppendCertsFromPEM(file)
+		tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+		tlsConfig.ClientCAs = certPool
+	}
+
+	return tls.NewListener(l, tlsConfig), nil
+}
+
+func newListener(proto, addr string, bufferRequests bool) (net.Listener, error) {
+	if bufferRequests {
+		return listenbuffer.NewListenBuffer(proto, addr, activationLock)
+	}
+
+	return net.Listen(proto, addr)
+}
+
 func changeGroup(addr string, nameOrGid string) error {
 	gid, err := lookupGidByName(nameOrGid)
 	if err != nil {
@@ -1391,99 +1450,95 @@ func changeGroup(addr string, nameOrGid string) error {
 	return os.Chown(addr, 0, gid)
 }
 
-// ListenAndServe sets up the required http.Server and gets it listening for
-// each addr passed in and does protocol specific checking.
-func ListenAndServe(proto, addr string, job *engine.Job) error {
-	var l net.Listener
+func setSocketGroup(addr, group string) error {
+	if group == "" {
+		return nil
+	}
+
+	if err := changeGroup(addr, group); err != nil {
+		if group != "docker" {
+			return err
+		}
+		log.Debugf("Warning: could not chgrp %s to docker: %v", addr, err)
+	}
+
+	return nil
+}
+
+func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) {
 	r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
 	if err != nil {
-		return err
+		return nil, err
 	}
 
-	if proto == "fd" {
-		return ServeFd(addr, r)
+	if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) {
+		return nil, err
 	}
+	mask := syscall.Umask(0777)
+	defer syscall.Umask(mask)
 
-	if proto == "unix" {
-		if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) {
-			return err
-		}
+	l, err := newListener("unix", addr, job.GetenvBool("BufferRequests"))
+	if err != nil {
+		return nil, err
 	}
 
-	var oldmask int
-	if proto == "unix" {
-		oldmask = syscall.Umask(0777)
+	if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil {
+		return nil, err
 	}
 
-	if job.GetenvBool("BufferRequests") {
-		l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock)
-	} else {
-		l, err = net.Listen(proto, addr)
+	if err := os.Chmod(addr, 0660); err != nil {
+		return nil, err
+	}
+
+	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
+}
+
+func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) {
+	if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
+		log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 	}
 
-	if proto == "unix" {
-		syscall.Umask(oldmask)
+	r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
+	if err != nil {
+		return nil, err
 	}
+
+	l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests"))
 	if err != nil {
-		return err
+		return nil, err
 	}
 
-	if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) {
-		tlsCert := job.Getenv("TlsCert")
-		tlsKey := job.Getenv("TlsKey")
-		cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey)
-		if err != nil {
-			return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?",
-				tlsCert, tlsKey, err)
-		}
-		tlsConfig := &tls.Config{
-			NextProtos:   []string{"http/1.1"},
-			Certificates: []tls.Certificate{cert},
-			// Avoid fallback on insecure SSL protocols
-			MinVersion: tls.VersionTLS10,
-		}
+	if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") {
+		var tlsCa string
 		if job.GetenvBool("TlsVerify") {
-			certPool := x509.NewCertPool()
-			file, err := ioutil.ReadFile(job.Getenv("TlsCa"))
-			if err != nil {
-				return fmt.Errorf("Couldn't read CA certificate: %s", err)
-			}
-			certPool.AppendCertsFromPEM(file)
-
-			tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
-			tlsConfig.ClientCAs = certPool
+			tlsCa = job.Getenv("TlsCa")
+		}
+		l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l)
+		if err != nil {
+			return nil, err
 		}
-		l = tls.NewListener(l, tlsConfig)
 	}
+	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
+}
 
+// NewServer sets up the required Server and does protocol specific checking.
+func NewServer(proto, addr string, job *engine.Job) (Server, error) {
 	// Basic error and sanity checking
 	switch proto {
+	case "fd":
+		return nil, serveFd(addr, job)
 	case "tcp":
-		if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
-			log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
-		}
+		return setupTcpHttp(addr, job)
 	case "unix":
-		socketGroup := job.Getenv("SocketGroup")
-		if socketGroup != "" {
-			if err := changeGroup(addr, socketGroup); err != nil {
-				if socketGroup == "docker" {
-					// if the user hasn't explicitly specified the group ownership, don't fail on errors.
-					log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
-				} else {
-					return err
-				}
-			}
-
-		}
-		if err := os.Chmod(addr, 0660); err != nil {
-			return err
-		}
+		return setupUnixHttp(addr, job)
 	default:
-		return fmt.Errorf("Invalid protocol format.")
+		return nil, fmt.Errorf("Invalid protocol format.")
 	}
+}
 
-	httpSrv := http.Server{Addr: addr, Handler: r}
-	return httpSrv.Serve(l)
+type Server interface {
+	Serve() error
+	Close() error
 }
 
 // ServeApi loops through all of the protocols sent in to docker and spawns
@@ -1505,7 +1560,12 @@ func ServeApi(job *engine.Job) engine.Status {
 		}
 		go func() {
 			log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
-			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
+			srv, err := NewServer(protoAddrParts[0], protoAddrParts[1], job)
+			if err != nil {
+				chErrors <- err
+				return
+			}
+			chErrors <- srv.Serve()
 		}()
 	}
 

+ 36 - 12
builder/dispatchers.go

@@ -14,8 +14,8 @@ import (
 	"regexp"
 	"strings"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/nat"
-	"github.com/docker/docker/pkg/log"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/runconfig"
 )
@@ -31,21 +31,39 @@ func nullDispatch(b *Builder, args []string, attributes map[string]bool, origina
 // in the dockerfile available from the next statement on via ${foo}.
 //
 func env(b *Builder, args []string, attributes map[string]bool, original string) error {
-	if len(args) != 2 {
-		return fmt.Errorf("ENV accepts two arguments")
+	if len(args) == 0 {
+		return fmt.Errorf("ENV is missing arguments")
+	}
+
+	if len(args)%2 != 0 {
+		// should never get here, but just in case
+		return fmt.Errorf("Bad input to ENV, too many args")
 	}
 
-	fullEnv := fmt.Sprintf("%s=%s", args[0], args[1])
+	commitStr := "ENV"
 
-	for i, envVar := range b.Config.Env {
-		envParts := strings.SplitN(envVar, "=", 2)
-		if args[0] == envParts[0] {
-			b.Config.Env[i] = fullEnv
-			return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv))
+	for j := 0; j < len(args); j++ {
+		// name  ==> args[j]
+		// value ==> args[j+1]
+		newVar := args[j] + "=" + args[j+1] + ""
+		commitStr += " " + newVar
+
+		gotOne := false
+		for i, envVar := range b.Config.Env {
+			envParts := strings.SplitN(envVar, "=", 2)
+			if envParts[0] == args[j] {
+				b.Config.Env[i] = newVar
+				gotOne = true
+				break
+			}
+		}
+		if !gotOne {
+			b.Config.Env = append(b.Config.Env, newVar)
 		}
+		j++
 	}
-	b.Config.Env = append(b.Config.Env, fullEnv)
-	return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv))
+
+	return b.commit("", b.Config.Cmd, commitStr)
 }
 
 // MAINTAINER some text <maybe@an.email.address>
@@ -97,6 +115,12 @@ func from(b *Builder, args []string, attributes map[string]bool, original string
 	name := args[0]
 
 	image, err := b.Daemon.Repositories().LookupImage(name)
+	if b.Pull {
+		image, err = b.pullImage(name)
+		if err != nil {
+			return err
+		}
+	}
 	if err != nil {
 		if b.Daemon.Graph().IsNotExist(err) {
 			image, err = b.pullImage(name)
@@ -183,7 +207,7 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
 	runCmd.SetOutput(ioutil.Discard)
 	runCmd.Usage = nil
 
-	config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...), nil)
+	config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...))
 	if err != nil {
 		return err
 	}

+ 2 - 1
builder/evaluator.go

@@ -27,10 +27,10 @@ import (
 	"path"
 	"strings"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/tarsum"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
@@ -90,6 +90,7 @@ type Builder struct {
 	// controls how images and containers are handled between steps.
 	Remove      bool
 	ForceRemove bool
+	Pull        bool
 
 	AuthConfig     *registry.AuthConfig
 	AuthConfigFile *registry.ConfigFile

+ 55 - 39
builder/internals.go

@@ -9,6 +9,7 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
+	"net/http"
 	"net/url"
 	"os"
 	"path"
@@ -18,17 +19,17 @@ import (
 	"syscall"
 	"time"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/daemon"
 	imagepkg "github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/parsers"
-	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/tarsum"
+	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/utils"
 )
@@ -217,7 +218,7 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
 	origPath = strings.TrimPrefix(origPath, "./")
 
 	// In the remote/URL case, download it and gen its hashcode
-	if utils.IsURL(origPath) {
+	if urlutil.IsURL(origPath) {
 		if !allowRemote {
 			return fmt.Errorf("Source can't be a URL for %s", cmdName)
 		}
@@ -257,8 +258,21 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
 		fmt.Fprintf(b.OutStream, "\n")
 		tmpFile.Close()
 
-		// Remove the mtime of the newly created tmp file
-		if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
+		// Set the mtime to the Last-Modified header value if present
+		// Otherwise just remove atime and mtime
+		times := make([]syscall.Timespec, 2)
+
+		lastMod := resp.Header.Get("Last-Modified")
+		if lastMod != "" {
+			mTime, err := http.ParseTime(lastMod)
+			// If we can't parse it then just let it default to 'zero'
+			// otherwise use the parsed time value
+			if err == nil {
+				times[1] = syscall.NsecToTimespec(mTime.UnixNano())
+			}
+		}
+
+		if err := system.UtimesNano(tmpFileName, times); err != nil {
 			return err
 		}
 
@@ -514,25 +528,19 @@ func (b *Builder) create() (*daemon.Container, error) {
 }
 
 func (b *Builder) run(c *daemon.Container) error {
-	var errCh chan error
-	if b.Verbose {
-		errCh = promise.Go(func() error {
-			// FIXME: call the 'attach' job so that daemon.Attach can be made private
-			//
-			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
-			// but without hijacking for stdin. Also, with attach there can be race
-			// condition because of some output already was printed before it.
-			return <-b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream)
-		})
-	}
-
 	//start the container
 	if err := c.Start(); err != nil {
 		return err
 	}
 
-	if errCh != nil {
-		if err := <-errCh; err != nil {
+	if b.Verbose {
+		logsJob := b.Engine.Job("logs", c.ID)
+		logsJob.Setenv("follow", "1")
+		logsJob.Setenv("stdout", "1")
+		logsJob.Setenv("stderr", "1")
+		logsJob.Stdout.Add(b.OutStream)
+		logsJob.Stderr.Set(b.ErrStream)
+		if err := logsJob.Run(); err != nil {
 			return err
 		}
 	}
@@ -641,37 +649,45 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec
 		resPath = path.Join(destPath, path.Base(origPath))
 	}
 
-	return fixPermissions(resPath, 0, 0)
+	return fixPermissions(origPath, resPath, 0, 0, destExists)
 }
 
-func copyAsDirectory(source, destination string, destinationExists bool) error {
+func copyAsDirectory(source, destination string, destExisted bool) error {
 	if err := chrootarchive.CopyWithTar(source, destination); err != nil {
 		return err
 	}
+	return fixPermissions(source, destination, 0, 0, destExisted)
+}
 
-	if destinationExists {
-		files, err := ioutil.ReadDir(source)
-		if err != nil {
-			return err
-		}
-
-		for _, file := range files {
-			if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
-				return err
-			}
-		}
-		return nil
+func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
+	// If the destination didn't already exist, or the destination isn't a
+	// directory, then we should Lchown the destination. Otherwise, we shouldn't
+	// Lchown the destination.
+	destStat, err := os.Stat(destination)
+	if err != nil {
+		// This should *never* be reached, because the destination must've already
+		// been created while untar-ing the context.
+		return err
 	}
+	doChownDestination := !destExisted || !destStat.IsDir()
 
-	return fixPermissions(destination, 0, 0)
-}
+	// We Walk on the source rather than on the destination because we don't
+	// want to change permissions on things we haven't created or modified.
+	return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
+		// Do not alter the walk root iff. it existed before, as it doesn't fall under
+		// the domain of "things we should chown".
+		if !doChownDestination && (source == fullpath) {
+			return nil
+		}
 
-func fixPermissions(destination string, uid, gid int) error {
-	return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
-		if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
+		// Path is prefixed by source: substitute with destination instead.
+		cleaned, err := filepath.Rel(source, fullpath)
+		if err != nil {
 			return err
 		}
-		return nil
+
+		fullpath = path.Join(destination, cleaned)
+		return os.Lchown(fullpath, uid, gid)
 	})
 }
 

+ 7 - 5
builder/job.go

@@ -5,13 +5,13 @@ import (
 	"io/ioutil"
 	"os"
 	"os/exec"
-	"strings"
 
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/utils"
 )
@@ -36,6 +36,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		noCache        = job.GetenvBool("nocache")
 		rm             = job.GetenvBool("rm")
 		forceRm        = job.GetenvBool("forcerm")
+		pull           = job.GetenvBool("pull")
 		authConfig     = &registry.AuthConfig{}
 		configFile     = &registry.ConfigFile{}
 		tag            string
@@ -58,8 +59,8 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 
 	if remoteURL == "" {
 		context = ioutil.NopCloser(job.Stdin)
-	} else if utils.IsGIT(remoteURL) {
-		if !strings.HasPrefix(remoteURL, "git://") {
+	} else if urlutil.IsGitURL(remoteURL) {
+		if !urlutil.IsGitTransport(remoteURL) {
 			remoteURL = "https://" + remoteURL
 		}
 		root, err := ioutil.TempDir("", "docker-build-git")
@@ -77,7 +78,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 			return job.Error(err)
 		}
 		context = c
-	} else if utils.IsURL(remoteURL) {
+	} else if urlutil.IsURL(remoteURL) {
 		f, err := utils.Download(remoteURL)
 		if err != nil {
 			return job.Error(err)
@@ -112,6 +113,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		UtilizeCache:    !noCache,
 		Remove:          rm,
 		ForceRemove:     forceRm,
+		Pull:            pull,
 		OutOld:          job.Stdout,
 		StreamFormatter: sf,
 		AuthConfig:      authConfig,
@@ -124,7 +126,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 	}
 
 	if repoName != "" {
-		b.Daemon.Repositories().Set(repoName, tag, id, false)
+		b.Daemon.Repositories().Set(repoName, tag, id, true)
 	}
 	return engine.StatusOK
 }

+ 131 - 8
builder/parser/line_parsers.go

@@ -12,6 +12,7 @@ import (
 	"fmt"
 	"strconv"
 	"strings"
+	"unicode"
 )
 
 var (
@@ -41,17 +42,139 @@ func parseSubCommand(rest string) (*Node, map[string]bool, error) {
 // parse environment like statements. Note that this does *not* handle
 // variable interpolation, which will be handled in the evaluator.
 func parseEnv(rest string) (*Node, map[string]bool, error) {
-	node := &Node{}
-	rootnode := node
-	strs := TOKEN_WHITESPACE.Split(rest, 2)
+	// This is kind of tricky because we need to support the old
+	// variant:   ENV name value
+	// as well as the new one:    ENV name=value ...
+	// The trigger to know which one is being used will be whether we hit
+	// a space or = first.  space ==> old, "=" ==> new
+
+	const (
+		inSpaces = iota // looking for start of a word
+		inWord
+		inQuote
+	)
+
+	words := []string{}
+	phase := inSpaces
+	word := ""
+	quote := '\000'
+	blankOK := false
+	var ch rune
+
+	for pos := 0; pos <= len(rest); pos++ {
+		if pos != len(rest) {
+			ch = rune(rest[pos])
+		}
+
+		if phase == inSpaces { // Looking for start of word
+			if pos == len(rest) { // end of input
+				break
+			}
+			if unicode.IsSpace(ch) { // skip spaces
+				continue
+			}
+			phase = inWord // found it, fall thru
+		}
+		if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
+			if blankOK || len(word) > 0 {
+				words = append(words, word)
+			}
+			break
+		}
+		if phase == inWord {
+			if unicode.IsSpace(ch) {
+				phase = inSpaces
+				if blankOK || len(word) > 0 {
+					words = append(words, word)
+
+					// Look for = and if no there assume
+					// we're doing the old stuff and
+					// just read the rest of the line
+					if !strings.Contains(word, "=") {
+						word = strings.TrimSpace(rest[pos:])
+						words = append(words, word)
+						break
+					}
+				}
+				word = ""
+				blankOK = false
+				continue
+			}
+			if ch == '\'' || ch == '"' {
+				quote = ch
+				blankOK = true
+				phase = inQuote
+				continue
+			}
+			if ch == '\\' {
+				if pos+1 == len(rest) {
+					continue // just skip \ at end
+				}
+				pos++
+				ch = rune(rest[pos])
+			}
+			word += string(ch)
+			continue
+		}
+		if phase == inQuote {
+			if ch == quote {
+				phase = inWord
+				continue
+			}
+			if ch == '\\' {
+				if pos+1 == len(rest) {
+					phase = inWord
+					continue // just skip \ at end
+				}
+				pos++
+				ch = rune(rest[pos])
+			}
+			word += string(ch)
+		}
+	}
 
-	if len(strs) < 2 {
-		return nil, nil, fmt.Errorf("ENV must have two arguments")
+	if len(words) == 0 {
+		return nil, nil, fmt.Errorf("ENV must have some arguments")
 	}
 
-	node.Value = strs[0]
-	node.Next = &Node{}
-	node.Next.Value = strs[1]
+	// Old format (ENV name value)
+	var rootnode *Node
+
+	if !strings.Contains(words[0], "=") {
+		node := &Node{}
+		rootnode = node
+		strs := TOKEN_WHITESPACE.Split(rest, 2)
+
+		if len(strs) < 2 {
+			return nil, nil, fmt.Errorf("ENV must have two arguments")
+		}
+
+		node.Value = strs[0]
+		node.Next = &Node{}
+		node.Next.Value = strs[1]
+	} else {
+		var prevNode *Node
+		for i, word := range words {
+			if !strings.Contains(word, "=") {
+				return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
+			}
+			parts := strings.SplitN(word, "=", 2)
+
+			name := &Node{}
+			value := &Node{}
+
+			name.Next = value
+			name.Value = parts[0]
+			value.Value = parts[1]
+
+			if i == 0 {
+				rootnode = name
+			} else {
+				prevNode.Next = name
+			}
+			prevNode = value
+		}
+	}
 
 	return rootnode, nil, nil
 }

+ 6 - 4
builder/parser/parser.go

@@ -103,10 +103,6 @@ func Parse(rwc io.Reader) (*Node, error) {
 
 	for scanner.Scan() {
 		scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace)
-		if stripComments(scannedLine) == "" {
-			continue
-		}
-
 		line, child, err := parseLine(scannedLine)
 		if err != nil {
 			return nil, err
@@ -129,6 +125,12 @@ func Parse(rwc io.Reader) (*Node, error) {
 					break
 				}
 			}
+			if child == nil && line != "" {
+				line, child, err = parseLine(line)
+				if err != nil {
+					return nil, err
+				}
+			}
 		}
 
 		if child != nil {

+ 1 - 1
builder/parser/testfiles-negative/env_equals_env/Dockerfile → builder/parser/testfiles-negative/env_no_value/Dockerfile

@@ -1,3 +1,3 @@
 FROM busybox
 
-ENV PATH=PATH
+ENV PATH

+ 0 - 1
builder/parser/testfiles/docker/Dockerfile

@@ -23,7 +23,6 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-docker-version	0.6.1
 FROM	ubuntu:14.04
 MAINTAINER	Tianon Gravi <admwiggin@gmail.com> (@tianon)
 

+ 0 - 1
builder/parser/testfiles/docker/result

@@ -1,4 +1,3 @@
-(docker-version)
 (from "ubuntu:14.04")
 (maintainer "Tianon Gravi <admwiggin@gmail.com> (@tianon)")
 (run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq 	apt-utils 	aufs-tools 	automake 	btrfs-tools 	build-essential 	curl 	dpkg-sig 	git 	iptables 	libapparmor-dev 	libcap-dev 	libsqlite3-dev 	lxc=1.0* 	mercurial 	pandoc 	parallel 	reprepro 	ruby1.9.1 	ruby1.9.1-dev 	s3cmd=1.1.0* 	--no-install-recommends")

+ 15 - 0
builder/parser/testfiles/env/Dockerfile

@@ -0,0 +1,15 @@
+FROM ubuntu
+ENV name value
+ENV name=value
+ENV name=value name2=value2
+ENV name="value value1"
+ENV name=value\ value2
+ENV name="value'quote space'value2"
+ENV name='value"double quote"value2'
+ENV name=value\ value2 name2=value2\ value3
+ENV name=value \
+    name1=value1 \
+    name2="value2a \
+           value2b" \
+    name3="value3a\n\"value3b\"" \
+	name4="value4a\\nvalue4b" \

+ 10 - 0
builder/parser/testfiles/env/result

@@ -0,0 +1,10 @@
+(from "ubuntu")
+(env "name" "value")
+(env "name" "value")
+(env "name" "value" "name2" "value2")
+(env "name" "value value1")
+(env "name" "value value2")
+(env "name" "value'quote space'value2")
+(env "name" "value\"double quote\"value2")
+(env "name" "value value2" "name2" "value2 value3")
+(env "name" "value" "name1" "value1" "name2" "value2a            value2b" "name3" "value3an\"value3b\"" "name4" "value4a\\nvalue4b")

+ 7 - 3
contrib/check-config.sh

@@ -76,7 +76,7 @@ check_flags() {
 	for flag in "$@"; do
 		echo "- $(check_flag "$flag")"
 	done
-} 
+}
 
 if [ ! -e "$CONFIG" ]; then
 	wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
@@ -135,7 +135,7 @@ flags=(
 	DEVPTS_MULTIPLE_INSTANCES
 	CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED
 	MACVLAN VETH BRIDGE
-	NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE
+	NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
 	NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
 	NF_NAT NF_NAT_NEEDED
 )
@@ -153,16 +153,20 @@ check_flags "${flags[@]}"
 echo '- Storage Drivers:'
 {
 	echo '- "'$(wrap_color 'aufs' blue)'":'
-	check_flags AUFS_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/  /'
+	check_flags AUFS_FS | sed 's/^/  /'
 	if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
 		echo "    $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
 	fi
+	check_flags EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/  /'
 
 	echo '- "'$(wrap_color 'btrfs' blue)'":'
 	check_flags BTRFS_FS | sed 's/^/  /'
 
 	echo '- "'$(wrap_color 'devicemapper' blue)'":'
 	check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/  /'
+
+	echo '- "'$(wrap_color 'overlay' blue)'":'
+	check_flags OVERLAY_FS | sed 's/^/  /'
 } | sed 's/^/  /'
 echo
 

+ 2 - 0
contrib/completion/MAINTAINERS

@@ -0,0 +1,2 @@
+Tianon Gravi <admwiggin@gmail.com> (@tianon)
+Jessie Frazelle <jess@docker.com> (@jfrazelle)

+ 171 - 48
contrib/completion/bash/docker

@@ -1,8 +1,8 @@
-#!bash
+#!/bin/bash
 #
 # bash completion file for core docker commands
 #
-# This script provides supports completion of:
+# This script provides completion of:
 #  - commands and their options
 #  - container ids and names
 #  - image repos and tags
@@ -11,9 +11,9 @@
 # To enable the completions either:
 #  - place this file in /etc/bash_completion.d
 #  or
-#  - copy this file and add the line below to your .bashrc after
-#    bash completion features are loaded
-#     . docker.bash
+#  - copy this file to e.g. ~/.docker-completion.sh and add the line
+#    below to your .bashrc after bash completion features are loaded
+#    . ~/.docker-completion.sh
 #
 # Note:
 # Currently, the completions will not work if the docker daemon is not
@@ -99,13 +99,60 @@ __docker_pos_first_nonflag() {
 	echo $counter
 }
 
+__docker_resolve_hostname() {
+	command -v host >/dev/null 2>&1 || return
+	COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') )
+}
+
+__docker_capabilities() {
+	# The list of capabilities is defined in types.go, ALL was added manually.
+	COMPREPLY=( $( compgen -W "
+		ALL
+		AUDIT_CONTROL
+		AUDIT_WRITE
+		BLOCK_SUSPEND
+		CHOWN
+		DAC_OVERRIDE
+		DAC_READ_SEARCH
+		FOWNER
+		FSETID
+		IPC_LOCK
+		IPC_OWNER
+		KILL
+		LEASE
+		LINUX_IMMUTABLE
+		MAC_ADMIN
+		MAC_OVERRIDE
+		MKNOD
+		NET_ADMIN
+		NET_BIND_SERVICE
+		NET_BROADCAST
+		NET_RAW
+		SETFCAP
+		SETGID
+		SETPCAP
+		SETUID
+		SYS_ADMIN
+		SYS_BOOT
+		SYS_CHROOT
+		SYSLOG
+		SYS_MODULE
+		SYS_NICE
+		SYS_PACCT
+		SYS_PTRACE
+		SYS_RAWIO
+		SYS_RESOURCE
+		SYS_TIME
+		SYS_TTY_CONFIG
+		WAKE_ALARM
+	" -- "$cur" ) )
+}
+
 _docker_docker() {
 	case "$prev" in
 		-H)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
@@ -138,8 +185,6 @@ _docker_build() {
 			__docker_image_repos_and_tags
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
@@ -160,8 +205,6 @@ _docker_commit() {
 		-m|--message|-a|--author|--run)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
@@ -222,7 +265,7 @@ _docker_create() {
 			__docker_containers_all
 			return
 			;;
-		-v|--volume)
+		-v|--volume|--device)
 			case "$cur" in
 				*:*)
 					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
@@ -255,19 +298,72 @@ _docker_create() {
 			esac
 			return
 			;;
-		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf)
+		--add-host)
+			case "$cur" in
+				*:)
+					__docker_resolve_hostname
+					return
+					;;
+			esac
+			;;
+		--cap-add|--cap-drop)
+			__docker_capabilities
 			return
 			;;
-		*)
+		--net)
+			case "$cur" in
+				container:*)
+					local cur=${cur#*:}
+					__docker_containers_all
+					;;
+				*)
+					COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") )
+					if [ "${COMPREPLY[*]}" = "container:" ] ; then
+						compopt -o nospace
+					fi
+					;;
+			esac
+			return
+			;;
+		--restart)
+			case "$cur" in
+				on-failure:*)
+					;;
+				*)
+					COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") )
+					;;
+			esac
+			return
+			;;
+		--security-opt)
+			case "$cur" in
+				label:*:*)
+					;;
+				label:*)
+					local cur=${cur##*:}
+					COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") )
+					if [ "${COMPREPLY[*]}" != "disable" ] ; then
+						compopt -o nospace
+					fi
+					;;
+				*)
+					COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") )
+					compopt -o nospace
+					;;
+			esac
+			return
+			;;
+		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search)
+			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) )
 			;;
 		*)
-			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf')
+			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart')
 
 			if [ $cword -eq $counter ]; then
 				__docker_image_repos_and_tags_and_ids
@@ -288,16 +384,12 @@ _docker_events() {
 		--since)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
 		-*)
 			COMPREPLY=( $( compgen -W "--since" -- "$cur" ) )
 			;;
-		*)
-			;;
 	esac
 }
 
@@ -376,8 +468,6 @@ _docker_inspect() {
 		-f|--format)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
@@ -403,16 +493,12 @@ _docker_login() {
 		-u|--username|-p|--password|-e|--email)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
 		-*)
 			COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) )
 			;;
-		*)
-			;;
 	esac
 }
 
@@ -452,16 +538,12 @@ _docker_ps() {
 		-n)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
 		-*)
 			COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) )
 			;;
-		*)
-			;;
 	esac
 }
 
@@ -470,8 +552,6 @@ _docker_pull() {
 		-t|--tag)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
@@ -499,8 +579,6 @@ _docker_restart() {
 		-t|--time)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
@@ -520,7 +598,6 @@ _docker_rm() {
 			return
 			;;
 		*)
-			local force=
 			for arg in "${COMP_WORDS[@]}"; do
 				case "$arg" in
 					-f|--force)
@@ -553,7 +630,7 @@ _docker_run() {
 			__docker_containers_all
 			return
 			;;
-		-v|--volume)
+		-v|--volume|--device)
 			case "$cur" in
 				*:*)
 					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
@@ -586,20 +663,72 @@ _docker_run() {
 			esac
 			return
 			;;
-		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf)
+		--add-host)
+			case "$cur" in
+				*:)
+					__docker_resolve_hostname
+					return
+					;;
+			esac
+			;;
+		--cap-add|--cap-drop)
+			__docker_capabilities
 			return
 			;;
-		*)
+		--net)
+			case "$cur" in
+				container:*)
+					local cur=${cur#*:}
+					__docker_containers_all
+					;;
+				*)
+					COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") )
+					if [ "${COMPREPLY[*]}" = "container:" ] ; then
+						compopt -o nospace
+					fi
+					;;
+			esac
+			return
+			;;
+		--restart)
+			case "$cur" in
+				on-failure:*)
+					;;
+				*)
+					COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") )
+					;;
+			esac
+			return
+			;;
+		--security-opt)
+			case "$cur" in
+				label:*:*)
+					;;
+				label:*)
+					local cur=${cur##*:}
+					COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") )
+					if [ "${COMPREPLY[*]}" != "disable" ] ; then
+						compopt -o nospace
+					fi
+					;;
+				*)
+					COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") )
+					compopt -o nospace
+					;;
+			esac
+			return
+			;;
+		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search)
+			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf --security-opt" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--rm -d --detach --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) )
 			;;
 		*)
-
-			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt')
+			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart')
 
 			if [ $cword -eq $counter ]; then
 				__docker_image_repos_and_tags_and_ids
@@ -620,16 +749,12 @@ _docker_search() {
 		-s|--stars)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
 		-*)
 			COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) )
 			;;
-		*)
-			;;
 	esac
 }
 
@@ -649,8 +774,6 @@ _docker_stop() {
 		-t|--time)
 			return
 			;;
-		*)
-			;;
 	esac
 
 	case "$cur" in
@@ -752,7 +875,7 @@ _docker() {
 	local cur prev words cword
 	_get_comp_words_by_ref -n : cur prev words cword
 
-	local command='docker'
+	local command='docker' cpos=0
 	local counter=1
 	while [ $counter -lt $cword ]; do
 		case "${words[$counter]}" in

+ 4 - 4
contrib/completion/fish/docker.fish

@@ -53,7 +53,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime'
-complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules"
@@ -67,7 +67,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print
 # attach
 complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach stdin'
-complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (non-TTY mode only)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container"
 
 # build
@@ -185,7 +185,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d '
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output"
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs'
-complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes'
+complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.'
 
 # pull
@@ -237,7 +237,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)"
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (non-TTY mode only)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)'

+ 84 - 72
contrib/completion/zsh/_docker

@@ -177,7 +177,9 @@ __docker_commands () {
     if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \
         && ! _retrieve_cache docker_subcommands;
     then
-        _docker_subcommands=(${${${${(f)"$(_call_program commands docker 2>&1)"}[5,-1]}## #}/ ##/:})
+        local -a lines
+        lines=(${(f)"$(_call_program commands docker 2>&1)"})
+        _docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I)    *]}]}## #}/ ##/:})
         _docker_subcommands=($_docker_subcommands 'help:Show help for a command')
         _store_cache docker_subcommands _docker_subcommands
     fi
@@ -190,22 +192,23 @@ __docker_subcommand () {
         (attach)
             _arguments \
                 '--no-stdin[Do not attach stdin]' \
-                '--sig-proxy[Proxify all received signal]' \
+                '--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \
                 ':containers:__docker_runningcontainers'
             ;;
         (build)
             _arguments \
-                '--force-rm[Always remove intermediate containers, even after unsuccessful builds]' \
+                '--force-rm[Always remove intermediate containers]' \
                 '--no-cache[Do not use cache when building the image]' \
-                '-q[Suppress verbose build output]' \
+                {-q,--quiet}'[Suppress verbose build output]' \
                 '--rm[Remove intermediate containers after a successful build]' \
-                '-t:repository:__docker_repositories_with_tags' \
+                {-t,--tag=-}'[Repository, name and tag to be applied]:repository:__docker_repositories_with_tags' \
                 ':path or URL:_directories'
             ;;
         (commit)
             _arguments \
-                '--author=-[Author]:author: ' \
-                '-m[Commit message]:message: ' \
+                {-a,--author=-}'[Author]:author: ' \
+                {-m,--message=-}'[Commit message]:message: ' \
+                {-p,--pause}'[Pause container during commit]' \
                 '--run=-[Configuration automatically applied when the image is run]:configuration: ' \
                 ':container:__docker_containers' \
                 ':repository:__docker_repositories_with_tags'
@@ -224,60 +227,40 @@ __docker_subcommand () {
                     ;;
             esac
             ;;
-        (create)
-            _arguments \
-                '-P[Publish all exposed ports to the host]' \
-                '-a[Attach to stdin, stdout or stderr]' \
-                '-c=-[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \
-                '--cidfile=-[Write the container ID to the file]:CID file:_files' \
-                '*--dns=-[Set custom dns servers]:dns server: ' \
-                '*-e=-[Set environment variables]:environment variable: ' \
-                '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \
-                '*--expose=-[Expose a port from the container without publishing it]: ' \
-                '-h=-[Container host name]:hostname:_hosts' \
-                '-i[Keep stdin open even if not attached]' \
-                '--link=-[Add link to another container]:link:->link' \
-                '--lxc-conf=-[Add custom lxc options]:lxc options: ' \
-                '-m=-[Memory limit (in bytes)]:limit: ' \
-                '--name=-[Container name]:name: ' \
-                '*-p=-[Expose a container'"'"'s port to the host]:port:_ports' \
-                '--privileged[Give extended privileges to this container]' \
-                '-t[Allocate a pseudo-tty]' \
-                '-u=-[Username or UID]:user:_users' \
-                '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\
-                '--volumes-from=-[Mount volumes from the specified container]:volume: ' \
-                '-w=-[Working directory inside the container]:directory:_directories' \
-                '(-):images:__docker_images' \
-                '(-):command: _command_names -e' \
-                '*::arguments: _normal'
         (diff|export)
             _arguments '*:containers:__docker_containers'
             ;;
+        (events)
+            _arguments \
+                '--since=-[Events created since this timestamp]:timestamp: ' \
+                '--until=-[Events created until this timestamp]:timestamp: '
+            ;;
         (exec)
             _arguments \
-                '-d[Detached mode: leave the container running in the background]' \
-                '-i[Keep stdin open even if not attached]' \
-                '-t[Allocate a pseudo-tty]' \
+                {-d,--detach}'[Detached mode: leave the container running in the background]' \
+                {-i,--interactive}'[Keep stdin open even if not attached]' \
+                {-t,--tty}'[Allocate a pseudo-tty]' \
                 ':containers:__docker_runningcontainers'
             ;;
         (history)
             _arguments \
                 '--no-trunc[Do not truncate output]' \
-                '-q[Only show numeric IDs]' \
+                {-q,--quiet}'[Only show numeric IDs]' \
                 '*:images:__docker_images'
             ;;
         (images)
             _arguments \
-                '-a[Show all images]' \
+                {-a,--all}'[Show all images]' \
+                '*'{-f,--filter=-}'[Filter values]:filter: ' \
                 '--no-trunc[Do not truncate output]' \
-                '-q[Only show numeric IDs]' \
+                {-q,--quiet}'[Only show numeric IDs]' \
                 '--tree[Output graph in tree format]' \
                 '--viz[Output graph in graphviz format]' \
                 ':repository:__docker_repositories'
             ;;
         (inspect)
             _arguments \
-                '--format=-[Format the output using the given go template]:template: ' \
+                {-f,--format=-}'[Format the output using the given go template]:template: ' \
                 '*:containers:__docker_containers'
             ;;
         (import)
@@ -298,20 +281,29 @@ __docker_subcommand () {
                        '3:file:_files'
             ;;
         (kill)
-            _arguments '*:containers:__docker_runningcontainers'
+            _arguments \
+                {-s,--signal=-}'[Signal to send]:signal:_signals' \
+                '*:containers:__docker_runningcontainers'
             ;;
         (load)
+            _arguments \
+                {-i,--input=-}'[Read from tar archive file]:tar:_files'
             ;;
         (login)
             _arguments \
-                '-e[Email]:email: ' \
-                '-p[Password]:password: ' \
-                '-u[Username]:username: ' \
+                {-e,--email=-}'[Email]:email: ' \
+                {-p,--password=-}'[Password]:password: ' \
+                {-u,--user=-}'[Username]:username: ' \
+                ':server: '
+            ;;
+        (logout)
+            _arguments \
                 ':server: '
             ;;
         (logs)
             _arguments \
-                '-f[Follow log output]' \
+                {-f,--follow}'[Follow log output]' \
+                {-t,--timestamps}'[Show timestamps]' \
                 '*:containers:__docker_containers'
             ;;
         (port)
@@ -319,24 +311,32 @@ __docker_subcommand () {
                 '1:containers:__docker_runningcontainers' \
                 '2:port:_ports'
             ;;
+        (pause|unpause)
+            _arguments \
+                '1:containers:__docker_runningcontainers'
+            ;;
         (start)
             _arguments \
-                '-a[Attach container'"'"'s stdout/stderr and forward all signals]' \
-                '-i[Attach container'"'"'s stding]' \
+                {-a,--attach}'[Attach container'"'"'s stdout/stderr and forward all signals]' \
+                {-i,--interactive}'[Attach container'"'"'s stding]' \
                 '*:containers:__docker_stoppedcontainers'
             ;;
         (rm)
             _arguments \
-                '--link[Remove the specified link and not the underlying container]' \
-                '-v[Remove the volumes associated to the container]' \
+                {-f,--force}'[Force removal]' \
+                {-l,--link}'[Remove the specified link and not the underlying container]' \
+                {-v,--volumes}'[Remove the volumes associated to the container]' \
                 '*:containers:__docker_stoppedcontainers'
             ;;
         (rmi)
             _arguments \
+                {-f,--force}'[Force removal]' \
+                '--no-prune[Do not delete untagged parents]' \
                 '*:images:__docker_images'
             ;;
         (restart|stop)
-            _arguments '-t[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \
+            _arguments \
+                {-t,--time=-}'[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \
                 '*:containers:__docker_runningcontainers'
             ;;
         (top)
@@ -352,47 +352,58 @@ __docker_subcommand () {
             ;;
         (ps)
             _arguments \
-                '-a[Show all containers]' \
+                {-a,--all}'[Show all containers]' \
                 '--before=-[Show only container created before...]:containers:__docker_containers' \
-                '-l[Show only the latest created container]' \
+                '*'{-f,--filter=-}'[Filter values]:filter: ' \
+                {-l,--latest}'[Show only the latest created container]' \
                 '-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \
                 '--no-trunc[Do not truncate output]' \
-                '-q[Only show numeric IDs]' \
-                '-s[Display sizes]' \
+                {-q,--quiet}'[Only show numeric IDs]' \
+                {-s,--size}'[Display total file sizes]' \
                 '--since=-[Show only containers created since...]:containers:__docker_containers'
             ;;
         (tag)
             _arguments \
-                '-f[force]'\
+                {-f,--force}'[force]'\
                 ':image:__docker_images'\
                 ':repository:__docker_repositories_with_tags'
             ;;
-        (run)
+        (create|run)
             _arguments \
-                '-P[Publish all exposed ports to the host]' \
-                '-a[Attach to stdin, stdout or stderr]' \
-                '-c[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \
+                {-a,--attach}'[Attach to stdin, stdout or stderr]' \
+                '*--add-host=-[Add a custom host-to-IP mapping]:host\:ip mapping: ' \
+                {-c,--cpu-shares=-}'[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \
+                '*--cap-add=-[Add Linux capabilities]:capability: ' \
+                '*--cap-drop=-[Drop Linux capabilities]:capability: ' \
                 '--cidfile=-[Write the container ID to the file]:CID file:_files' \
-                '-d[Detached mode: leave the container running in the background]' \
+                '--cpuset=-[CPUs in which to allow execution]:CPU set: ' \
+                {-d,--detach}'[Detached mode: leave the container running in the background]' \
+                '*--device=-[Add a host device to the container]:device:_files' \
                 '*--dns=-[Set custom dns servers]:dns server: ' \
-                '*-e[Set environment variables]:environment variable: ' \
+                '*--dns-search=-[Set custom DNS search domains]:dns domains: ' \
+                '*'{-e,--environment=-}'[Set environment variables]:environment variable: ' \
                 '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \
+                '*--env-file=-[Read environment variables from a file]:environment file:_files' \
                 '*--expose=-[Expose a port from the container without publishing it]: ' \
-                '-h[Container host name]:hostname:_hosts' \
-                '-i[Keep stdin open even if not attached]' \
-                '--link=-[Add link to another container]:link:->link' \
-                '--lxc-conf=-[Add custom lxc options]:lxc options: ' \
+                {-h,--hostname=-}'[Container host name]:hostname:_hosts' \
+                {-i,--interactive}'[Keep stdin open even if not attached]' \
+                '*--link=-[Add link to another container]:link:->link' \
+                '*--lxc-conf=-[Add custom lxc options]:lxc options: ' \
                 '-m[Memory limit (in bytes)]:limit: ' \
                 '--name=-[Container name]:name: ' \
-                '*-p[Expose a container'"'"'s port to the host]:port:_ports' \
+                '--net=-[Network mode]:network mode:(bridge none container: host)' \
+                {-P,--publish-all}'[Publish all exposed ports]' \
+                '*'{-p,--publish=-}'[Expose a container'"'"'s port to the host]:port:_ports' \
                 '--privileged[Give extended privileges to this container]' \
+                '--restart=-[Restart policy]:restart policy:(no on-failure always)' \
                 '--rm[Remove intermediate containers when it exits]' \
-                '--sig-proxy[Proxify all received signal]' \
-                '-t[Allocate a pseudo-tty]' \
-                '-u[Username or UID]:user:_users' \
-                '*-v[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\
-                '--volumes-from=-[Mount volumes from the specified container]:volume: ' \
-                '-w[Working directory inside the container]:directory:_directories' \
+                '*--security-opt=-[Security options]:security option: ' \
+                '--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \
+                {-t,--tty}'[Allocate a pseudo-tty]' \
+                {-u,--user=-}'[Username or UID]:user:_users' \
+                '*-v[Bind mount a volume]:volume: '\
+                '*--volumes-from=-[Mount volumes from the specified container]:volume: ' \
+                {-w,--workdir=-}'[Working directory inside the container]:directory:_directories' \
                 '(-):images:__docker_images' \
                 '(-):command: _command_names -e' \
                 '*::arguments: _normal'
@@ -416,6 +427,7 @@ __docker_subcommand () {
             ;;
         (save)
             _arguments \
+                {-o,--output=-}'[Write to file]:file:_files' \
                 ':images:__docker_images'
             ;;
         (wait)

+ 0 - 2
contrib/desktop-integration/chromium/Dockerfile

@@ -20,8 +20,6 @@
 #   docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \
 #   -e DISPLAY=unix$DISPLAY chromium
 
-DOCKER_VERSION 1.3
-
 # Base docker image
 FROM debian:jessie
 MAINTAINER Jessica Frazelle <jess@docker.com>

+ 0 - 2
contrib/desktop-integration/gparted/Dockerfile

@@ -17,8 +17,6 @@
 #     -e DISPLAY=unix$DISPLAY gparted
 #
 
-DOCKER-VERSION 1.3
-
 # Base docker image
 FROM debian:jessie
 MAINTAINER Jessica Frazelle <jess@docker.com>

+ 8 - 4
contrib/docker-device-tool/device_tool.go

@@ -3,12 +3,15 @@ package main
 import (
 	"flag"
 	"fmt"
-	"github.com/docker/docker/daemon/graphdriver/devmapper"
 	"os"
 	"path"
 	"sort"
 	"strconv"
 	"strings"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/daemon/graphdriver/devmapper"
+	"github.com/docker/docker/pkg/devicemapper"
 )
 
 func usage() {
@@ -60,6 +63,7 @@ func main() {
 
 	if *flDebug {
 		os.Setenv("DEBUG", "1")
+		log.SetLevel(log.DebugLevel)
 	}
 
 	if flag.NArg() < 1 {
@@ -69,7 +73,7 @@ func main() {
 	args := flag.Args()
 
 	home := path.Join(*root, "devicemapper")
-	devices, err := devmapper.NewDeviceSet(home, false)
+	devices, err := devmapper.NewDeviceSet(home, false, nil)
 	if err != nil {
 		fmt.Println("Can't initialize device mapper: ", err)
 		os.Exit(1)
@@ -142,7 +146,7 @@ func main() {
 			usage()
 		}
 
-		err := devices.RemoveDevice(args[1])
+		err := devicemapper.RemoveDevice(args[1])
 		if err != nil {
 			fmt.Println("Can't remove device: ", err)
 			os.Exit(1)
@@ -153,7 +157,7 @@ func main() {
 			usage()
 		}
 
-		err := devices.MountDevice(args[1], args[2], false)
+		err := devices.MountDevice(args[1], args[2], "")
 		if err != nil {
 			fmt.Println("Can't create snap device: ", err)
 			os.Exit(1)

+ 0 - 2
contrib/host-integration/Dockerfile.dev

@@ -2,8 +2,6 @@
 # This Dockerfile will create an image that allows to generate upstart and
 # systemd scripts (more to come)
 #
-# docker-version 0.6.2
-#
 
 FROM		ubuntu:12.10
 MAINTAINER	Guillaume J. Charmes <guillaume@docker.com>

+ 1 - 0
contrib/init/systemd/MAINTAINERS

@@ -1,2 +1,3 @@
 Lokesh Mandvekar <lsm5@fedoraproject.org> (@lsm5)
 Brandon Philips <brandon.philips@coreos.com> (@philips)
+Jessie Frazelle <jess@docker.com> (@jfrazelle)

+ 2 - 0
contrib/init/upstart/MAINTAINERS

@@ -0,0 +1,2 @@
+Tianon Gravi <admwiggin@gmail.com> (@tianon)
+Jessie Frazelle <jess@docker.com> (@jfrazelle)

+ 1 - 1
contrib/mkimage-arch.sh

@@ -60,6 +60,6 @@ mknod -m 600 $DEV/initctl p
 mknod -m 666 $DEV/ptmx c 5 2
 ln -sf /proc/self/fd $DEV/fd
 
-tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux
+tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - archlinux
 docker run -i -t archlinux echo Success.
 rm -rf $ROOTFS

+ 1 - 1
contrib/mkimage-yum.sh

@@ -57,7 +57,7 @@ mknod -m 666 "$target"/dev/tty0 c 4 0
 mknod -m 666 "$target"/dev/urandom c 1 9
 mknod -m 666 "$target"/dev/zero c 1 5
 
-yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \
+yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \
     --setopt=group_package_types=mandatory -y groupinstall Core
 yum -c "$yum_config" --installroot="$target" -y clean all
 

+ 4 - 1
contrib/mkimage/debootstrap

@@ -15,9 +15,12 @@ done
 suite="$1"
 shift
 
+# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ...
+: ${DEBOOTSTRAP:=debootstrap}
+
 (
 	set -x
-	debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@"
+	$DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@"
 )
 
 # now for some Docker-specific tweaks

+ 1 - 0
daemon/MAINTAINERS

@@ -3,4 +3,5 @@ Victor Vieux <vieux@docker.com> (@vieux)
 Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
 Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
 Tibor Vass <teabee89@gmail.com> (@tiborvass)
+Vishnu Kannan <vishnuk@google.com> (@vishh)
 volumes.go: Brian Goff <cpuguy83@gmail.com> (@cpuguy83)

+ 4 - 27
daemon/attach.go

@@ -6,10 +6,10 @@ import (
 	"os"
 	"time"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/jsonlog"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/utils"
 )
@@ -83,7 +83,6 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 		var (
 			cStdin           io.ReadCloser
 			cStdout, cStderr io.Writer
-			cStdinCloser     io.Closer
 		)
 
 		if stdin {
@@ -94,7 +93,6 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 				io.Copy(w, job.Stdin)
 			}()
 			cStdin = r
-			cStdinCloser = job.Stdin
 		}
 		if stdout {
 			cStdout = job.Stdout
@@ -103,7 +101,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 			cStderr = job.Stderr
 		}
 
-		<-daemon.Attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdinCloser, cStdout, cStderr)
+		<-daemon.attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdout, cStderr)
 		// If we are in stdinonce mode, wait for the process to end
 		// otherwise, simply return
 		if container.Config.StdinOnce && !container.Config.Tty {
@@ -113,13 +111,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 	return engine.StatusOK
 }
 
-// FIXME: this should be private, and every outside subsystem
-// should go through the "container_attach" job. But that would require
-// that job to be properly documented, as well as the relationship between
-// Attach and ContainerAttach.
-//
-// This method is in use by builder/builder.go.
-func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
+func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
 	var (
 		cStdout, cStderr io.ReadCloser
 		nJobs            int
@@ -136,10 +128,10 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t
 			go func() {
 				log.Debugf("attach: stdin: begin")
 				defer log.Debugf("attach: stdin: end")
-				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
 				if stdinOnce && !tty {
 					defer cStdin.Close()
 				} else {
+					// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
 					defer func() {
 						if cStdout != nil {
 							cStdout.Close()
@@ -179,9 +171,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t
 				if stdinOnce && stdin != nil {
 					defer stdin.Close()
 				}
-				if stdinCloser != nil {
-					defer stdinCloser.Close()
-				}
 				_, err := io.Copy(stdout, cStdout)
 				if err == io.ErrClosedPipe {
 					err = nil
@@ -195,9 +184,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t
 	} else {
 		// Point stdout of container to a no-op writer.
 		go func() {
-			if stdinCloser != nil {
-				defer stdinCloser.Close()
-			}
 			if cStdout, err := streamConfig.StdoutPipe(); err != nil {
 				log.Errorf("attach: stdout pipe: %s", err)
 			} else {
@@ -219,9 +205,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t
 				if stdinOnce && stdin != nil {
 					defer stdin.Close()
 				}
-				if stdinCloser != nil {
-					defer stdinCloser.Close()
-				}
 				_, err := io.Copy(stderr, cStderr)
 				if err == io.ErrClosedPipe {
 					err = nil
@@ -235,10 +218,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t
 	} else {
 		// Point stderr at a no-op writer.
 		go func() {
-			if stdinCloser != nil {
-				defer stdinCloser.Close()
-			}
-
 			if cStderr, err := streamConfig.StderrPipe(); err != nil {
 				log.Errorf("attach: stdout pipe: %s", err)
 			} else {
@@ -257,8 +236,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t
 			}
 		}()
 
-		// FIXME: how to clean up the stdin goroutine without the unwanted side effect
-		// of closing the passed stdin? Add an intermediary io.Pipe?
 		for i := 0; i < nJobs; i++ {
 			log.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
 			if err := <-errors; err != nil {

+ 5 - 2
daemon/config.go

@@ -40,6 +40,8 @@ type Config struct {
 	DisableNetwork              bool
 	EnableSelinuxSupport        bool
 	Context                     map[string][]string
+	TrustKeyPath                string
+	Labels                      []string
 }
 
 // InstallFlags adds command-line options to the top-level flag parser for
@@ -57,7 +59,7 @@ func (config *Config) InstallFlags() {
 	flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
 	flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)")
 	opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)")
-	flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication")
+	flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Allow unrestricted inter-container and Docker daemon host communication")
 	flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver")
 	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver")
 	flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver")
@@ -68,6 +70,7 @@ func (config *Config) InstallFlags() {
 	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
 	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
 	opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror")
+	opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon (displayed in `docker info`)")
 
 	// Localhost is by default considered as an insecure registry
 	// This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker).
@@ -78,7 +81,7 @@ func (config *Config) InstallFlags() {
 	config.InsecureRegistries = append(config.InsecureRegistries, "127.0.0.0/8")
 }
 
-func GetDefaultNetworkMtu() int {
+func getDefaultNetworkMtu() int {
 	if iface, err := networkdriver.GetDefaultRouteIface(); err == nil {
 		return iface.MTU
 	}

+ 69 - 12
daemon/container.go

@@ -17,6 +17,7 @@ import (
 	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/label"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/image"
@@ -25,7 +26,6 @@ import (
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/broadcastwriter"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/networkfs/etchosts"
 	"github.com/docker/docker/pkg/networkfs/resolvconf"
 	"github.com/docker/docker/pkg/promise"
@@ -102,13 +102,17 @@ func (container *Container) FromDisk() error {
 		return err
 	}
 
-	data, err := ioutil.ReadFile(pth)
+	jsonSource, err := os.Open(pth)
 	if err != nil {
 		return err
 	}
+	defer jsonSource.Close()
+
+	dec := json.NewDecoder(jsonSource)
+
 	// Load container settings
 	// udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it
-	if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") {
+	if err := dec.Decode(container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") {
 		return err
 	}
 
@@ -229,6 +233,18 @@ func populateCommand(c *Container, env []string) error {
 		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
 	}
 
+	ipc := &execdriver.Ipc{}
+
+	if c.hostConfig.IpcMode.IsContainer() {
+		ic, err := c.getIpcContainer()
+		if err != nil {
+			return err
+		}
+		ipc.ContainerID = ic.ID
+	} else {
+		ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
+	}
+
 	// Build lists of devices allowed and created within the container.
 	userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices))
 	for i, deviceMapping := range c.hostConfig.Devices {
@@ -244,7 +260,10 @@ func populateCommand(c *Container, env []string) error {
 	autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...)
 
 	// TODO: this can be removed after lxc-conf is fully deprecated
-	lxcConfig := mergeLxcConfIntoOptions(c.hostConfig)
+	lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
+	if err != nil {
+		return err
+	}
 
 	resources := &execdriver.Resources{
 		Memory:     c.Config.Memory,
@@ -270,6 +289,7 @@ func populateCommand(c *Container, env []string) error {
 		InitPath:           "/.dockerinit",
 		WorkingDir:         c.Config.WorkingDir,
 		Network:            en,
+		Ipc:                ipc,
 		Resources:          resources,
 		AllowedDevices:     allowedDevices,
 		AutoCreatedDevices: autoCreatedDevices,
@@ -297,6 +317,12 @@ func (container *Container) Start() (err error) {
 	// setup has been cleaned up properly
 	defer func() {
 		if err != nil {
+			container.setError(err)
+			// if no one else has set it, make sure we don't leave it at zero
+			if container.ExitCode == 0 {
+				container.ExitCode = 128
+			}
+			container.toDisk()
 			container.cleanup()
 		}
 	}()
@@ -414,7 +440,7 @@ func (container *Container) buildHostsFiles(IP string) error {
 	}
 	container.HostsPath = hostsPath
 
-	extraContent := make(map[string]string)
+	var extraContent []etchosts.Record
 
 	children, err := container.daemon.Children(container.Name)
 	if err != nil {
@@ -423,15 +449,15 @@ func (container *Container) buildHostsFiles(IP string) error {
 
 	for linkAlias, child := range children {
 		_, alias := path.Split(linkAlias)
-		extraContent[alias] = child.NetworkSettings.IPAddress
+		extraContent = append(extraContent, etchosts.Record{Hosts: alias, IP: child.NetworkSettings.IPAddress})
 	}
 
 	for _, extraHost := range container.hostConfig.ExtraHosts {
 		parts := strings.Split(extraHost, ":")
-		extraContent[parts[0]] = parts[1]
+		extraContent = append(extraContent, etchosts.Record{Hosts: parts[0], IP: parts[1]})
 	}
 
-	return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, &extraContent)
+	return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, extraContent)
 }
 
 func (container *Container) buildHostnameAndHostsFiles(IP string) error {
@@ -455,6 +481,7 @@ func (container *Container) AllocateNetwork() error {
 	)
 
 	job := eng.Job("allocate_interface", container.ID)
+	job.Setenv("RequestedMac", container.Config.MacAddress)
 	if env, err = job.Stdout.AddEnv(); err != nil {
 		return err
 	}
@@ -525,7 +552,9 @@ func (container *Container) ReleaseNetwork() {
 	}
 	eng := container.daemon.eng
 
-	eng.Job("release_interface", container.ID).Run()
+	job := eng.Job("release_interface", container.ID)
+	job.SetenvBool("overrideShutdown", true)
+	job.Run()
 	container.NetworkSettings = &NetworkSettings{}
 }
 
@@ -576,6 +605,10 @@ func (container *Container) cleanup() {
 	if err := container.Unmount(); err != nil {
 		log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
 	}
+
+	for _, eConfig := range container.execCommands.s {
+		container.daemon.unregisterExecCommand(eConfig)
+	}
 }
 
 func (container *Container) KillSig(sig int) error {
@@ -691,6 +724,9 @@ func (container *Container) Restart(seconds int) error {
 }
 
 func (container *Container) Resize(h, w int) error {
+	if !container.IsRunning() {
+		return fmt.Errorf("Cannot resize container %s, container is not running", container.ID)
+	}
 	return container.command.ProcessConfig.Terminal.Resize(h, w)
 }
 
@@ -826,19 +862,25 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
 		return nil, err
 	}
 
-	var filter []string
-
 	basePath, err := container.getResourcePath(resource)
 	if err != nil {
 		container.Unmount()
 		return nil, err
 	}
 
+	// Check if this is actually in a volume
+	for _, mnt := range container.VolumeMounts() {
+		if len(mnt.MountToPath) > 0 && strings.HasPrefix(resource, mnt.MountToPath[1:]) {
+			return mnt.Export(resource)
+		}
+	}
+
 	stat, err := os.Stat(basePath)
 	if err != nil {
 		container.Unmount()
 		return nil, err
 	}
+	var filter []string
 	if !stat.IsDir() {
 		d, f := path.Split(basePath)
 		basePath = d
@@ -965,7 +1007,7 @@ func (container *Container) updateParentsHosts() error {
 		c := container.daemon.Get(cid)
 		if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
 			if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, container.Name[1:]); err != nil {
-				return fmt.Errorf("Failed to update /etc/hosts in parent container: %v", err)
+				log.Errorf("Failed to update /etc/hosts in parent container: %v", err)
 			}
 		}
 	}
@@ -1228,10 +1270,25 @@ func (container *Container) GetMountLabel() string {
 	return container.MountLabel
 }
 
+func (container *Container) getIpcContainer() (*Container, error) {
+	containerID := container.hostConfig.IpcMode.Container()
+	c := container.daemon.Get(containerID)
+	if c == nil {
+		return nil, fmt.Errorf("no such container to join IPC: %s", containerID)
+	}
+	if !c.IsRunning() {
+		return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
+	}
+	return c, nil
+}
+
 func (container *Container) getNetworkedContainer() (*Container, error) {
 	parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
 	switch parts[0] {
 	case "container":
+		if len(parts) != 2 {
+			return nil, fmt.Errorf("no container specified to join network")
+		}
 		nc := container.daemon.Get(parts[1])
 		if nc == nil {
 			return nil, fmt.Errorf("no such container to join network: %s", parts[1])

+ 37 - 6
daemon/create.go

@@ -1,10 +1,13 @@
 package daemon
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/runconfig"
+	"github.com/docker/libcontainer/label"
 )
 
 func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
@@ -50,12 +53,9 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
 		job.Errorf("IPv4 forwarding is disabled.\n")
 	}
 	container.LogEvent("create")
-	// FIXME: this is necessary because daemon.Create might return a nil container
-	// with a non-nil error. This should not happen! Once it's fixed we
-	// can remove this workaround.
-	if container != nil {
-		job.Printf("%s\n", container.ID)
-	}
+
+	job.Printf("%s\n", container.ID)
+
 	for _, warning := range buildWarnings {
 		job.Errorf("%s\n", warning)
 	}
@@ -80,6 +80,12 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
 	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
 		return nil, nil, err
 	}
+	if hostConfig != nil && hostConfig.SecurityOpt == nil {
+		hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode)
+		if err != nil {
+			return nil, nil, err
+		}
+	}
 	if container, err = daemon.newContainer(name, config, img); err != nil {
 		return nil, nil, err
 	}
@@ -94,8 +100,33 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
 			return nil, nil, err
 		}
 	}
+	if err := container.Mount(); err != nil {
+		return nil, nil, err
+	}
+	defer container.Unmount()
+	if err := container.prepareVolumes(); err != nil {
+		return nil, nil, err
+	}
 	if err := container.ToDisk(); err != nil {
 		return nil, nil, err
 	}
 	return container, warnings, nil
 }
+
+func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode) ([]string, error) {
+	if ipcMode.IsHost() {
+		return label.DisableSecOpt(), nil
+	}
+	if ipcContainer := ipcMode.Container(); ipcContainer != "" {
+		c := daemon.Get(ipcContainer)
+		if c == nil {
+			return nil, fmt.Errorf("no such container to join IPC: %s", ipcContainer)
+		}
+		if !c.IsRunning() {
+			return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer)
+		}
+
+		return label.DupSecOpt(c.ProcessLabel), nil
+	}
+	return nil, nil
+}

+ 21 - 10
daemon/daemon.go

@@ -14,6 +14,8 @@ import (
 
 	"github.com/docker/libcontainer/label"
 
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/api"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
 	"github.com/docker/docker/daemon/execdriver/lxc"
@@ -29,7 +31,6 @@ import (
 	"github.com/docker/docker/pkg/broadcastwriter"
 	"github.com/docker/docker/pkg/graphdb"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/namesgenerator"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers/kernel"
@@ -83,6 +84,7 @@ func (c *contStore) List() []*Container {
 }
 
 type Daemon struct {
+	ID             string
 	repository     string
 	sysInitPath    string
 	containers     *contStore
@@ -128,6 +130,7 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
 		"execCreate":        daemon.ContainerExecCreate,
 		"execStart":         daemon.ContainerExecStart,
 		"execResize":        daemon.ContainerExecResize,
+		"execInspect":       daemon.ContainerExecInspect,
 	} {
 		if err := eng.Register(name, method); err != nil {
 			return err
@@ -231,7 +234,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 		log.Debugf("killing old running container %s", container.ID)
 
 		existingPid := container.Pid
-		container.SetStopped(0)
+		container.SetStopped(&execdriver.ExitStatus{0, false})
 
 		// We only have to handle this for lxc because the other drivers will ensure that
 		// no processes are left when docker dies
@@ -263,7 +266,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 
 			log.Debugf("Marking as stopped")
 
-			container.SetStopped(-127)
+			container.SetStopped(&execdriver.ExitStatus{-127, false})
 			if err := container.ToDisk(); err != nil {
 				return err
 			}
@@ -304,7 +307,7 @@ func (daemon *Daemon) restore() error {
 	)
 
 	if !debug {
-		log.Infof("Loading containers: ")
+		log.Infof("Loading containers: start.")
 	}
 	dir, err := ioutil.ReadDir(daemon.repository)
 	if err != nil {
@@ -392,7 +395,8 @@ func (daemon *Daemon) restore() error {
 	}
 
 	if !debug {
-		log.Infof(": done.")
+		fmt.Println()
+		log.Infof("Loading containers: done.")
 	}
 
 	return nil
@@ -692,6 +696,9 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 			if child == nil {
 				return fmt.Errorf("Could not get container for %s", parts["name"])
 			}
+			if child.hostConfig.NetworkMode.IsHost() {
+				return runconfig.ErrConflictHostNetworkAndLinks
+			}
 			if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil {
 				return err
 			}
@@ -717,10 +724,8 @@ func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) {
 }
 
 func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) {
-	// Apply configuration defaults
 	if config.Mtu == 0 {
-		// FIXME: GetDefaultNetwork Mtu doesn't need to be public anymore
-		config.Mtu = GetDefaultNetworkMtu()
+		config.Mtu = getDefaultNetworkMtu()
 	}
 	// Check for mutually incompatible config options
 	if config.BridgeIface != "" && config.BridgeIP != "" {
@@ -893,7 +898,13 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 		return nil, err
 	}
 
+	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
+	if err != nil {
+		return nil, err
+	}
+
 	daemon := &Daemon{
+		ID:             trustKey.PublicKey().KeyID(),
 		repository:     daemonRepo,
 		containers:     &contStore{s: make(map[string]*Container)},
 		execCommands:   newExecStore(),
@@ -918,7 +929,6 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 	eng.OnShutdown(func() {
 		// FIXME: if these cleanup steps can be called concurrently, register
 		// them as separate handlers to speed up total shutdown time
-		// FIXME: use engine logging instead of log.Errorf
 		if err := daemon.shutdown(); err != nil {
 			log.Errorf("daemon.shutdown(): %s", err)
 		}
@@ -968,6 +978,7 @@ func (daemon *Daemon) Mount(container *Container) error {
 	if container.basefs == "" {
 		container.basefs = dir
 	} else if container.basefs != dir {
+		daemon.driver.Put(container.ID)
 		return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
 			daemon.driver, container.ID, container.basefs, dir)
 	}
@@ -989,7 +1000,7 @@ func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) {
 	return daemon.driver.Diff(container.ID, initID)
 }
 
-func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
+func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
 	return daemon.execDriver.Run(c.command, pipes, startCallback)
 }
 

+ 1 - 1
daemon/daemon_aufs.go

@@ -3,10 +3,10 @@
 package daemon
 
 import (
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/daemon/graphdriver/aufs"
 	"github.com/docker/docker/graph"
-	"github.com/docker/docker/pkg/log"
 )
 
 // Given the graphdriver ad, if it is aufs, then migrate it.

+ 7 - 0
daemon/daemon_overlay.go

@@ -0,0 +1,7 @@
+// +build !exclude_graphdriver_overlay
+
+package daemon
+
+import (
+	_ "github.com/docker/docker/daemon/graphdriver/overlay"
+)

+ 1 - 1
daemon/delete.go

@@ -5,8 +5,8 @@ import (
 	"os"
 	"path"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/log"
 )
 
 func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {

+ 25 - 11
daemon/exec.go

@@ -9,12 +9,12 @@ import (
 	"strings"
 	"sync"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver/lxc"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/broadcastwriter"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
@@ -24,6 +24,7 @@ type execConfig struct {
 	sync.Mutex
 	ID            string
 	Running       bool
+	ExitCode      int
 	ProcessConfig execdriver.ProcessConfig
 	StreamConfig
 	OpenStdin  bool
@@ -97,7 +98,9 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
 	if !container.IsRunning() {
 		return nil, fmt.Errorf("Container %s is not running", name)
 	}
-
+	if container.IsPaused() {
+		return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name)
+	}
 	return container, nil
 }
 
@@ -117,13 +120,14 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
 		return job.Error(err)
 	}
 
-	config := runconfig.ExecConfigFromJob(job)
+	config, err := runconfig.ExecConfigFromJob(job)
+	if err != nil {
+		return job.Error(err)
+	}
 
 	entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd)
 
 	processConfig := execdriver.ProcessConfig{
-		Privileged: config.Privileged,
-		User:       config.User,
 		Tty:        config.Tty,
 		Entrypoint: entrypoint,
 		Arguments:  args,
@@ -155,7 +159,6 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
 	var (
 		cStdin           io.ReadCloser
 		cStdout, cStderr io.Writer
-		cStdinCloser     io.Closer
 		execName         = job.Args[0]
 	)
 
@@ -183,10 +186,10 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
 		r, w := io.Pipe()
 		go func() {
 			defer w.Close()
+			defer log.Debugf("Closing buffered stdin pipe")
 			io.Copy(w, job.Stdin)
 		}()
 		cStdin = r
-		cStdinCloser = job.Stdin
 	}
 	if execConfig.OpenStdout {
 		cStdout = job.Stdout
@@ -204,12 +207,13 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
 		execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
 	}
 
-	attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr)
+	attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
 
 	execErr := make(chan error)
 
-	// Remove exec from daemon and container.
-	defer d.unregisterExecCommand(execConfig)
+	// Note, the execConfig data will be removed when the container
+	// itself is deleted.  This allows us to query it (for things like
+	// the exitStatus) even after the cmd is done running.
 
 	go func() {
 		err := container.Exec(execConfig)
@@ -232,7 +236,17 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
 }
 
 func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
-	return d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback)
+	exitStatus, err := d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback)
+
+	// On err, make sure we don't leave ExitCode at zero
+	if err != nil && exitStatus == 0 {
+		exitStatus = 128
+	}
+
+	execConfig.ExitCode = exitStatus
+	execConfig.Running = false
+
+	return exitStatus, err
 }
 
 func (container *Container) Exec(execConfig *execConfig) error {

+ 17 - 1
daemon/execdriver/driver.go

@@ -40,8 +40,17 @@ type TtyTerminal interface {
 	Master() *os.File
 }
 
+// ExitStatus provides exit reasons for a container.
+type ExitStatus struct {
+	// The exit code with which the container exited.
+	ExitCode int
+
+	// Whether the container encountered an OOM.
+	OOMKilled bool
+}
+
 type Driver interface {
-	Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code
+	Run(c *Command, pipes *Pipes, startCallback StartCallback) (ExitStatus, error) // Run executes the process and blocks until the process exits and returns the exit code
 	// Exec executes the process in an existing container, blocks until the process exits and returns the exit code
 	Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error)
 	Kill(c *Command, sig int) error
@@ -62,6 +71,12 @@ type Network struct {
 	HostNetworking bool              `json:"host_networking"`
 }
 
+// IPC settings of the container
+type Ipc struct {
+	ContainerID string `json:"container_id"` // id of the container to join ipc.
+	HostIpc     bool   `json:"host_ipc"`
+}
+
 type NetworkInterface struct {
 	Gateway     string `json:"gateway"`
 	IPAddress   string `json:"ip"`
@@ -106,6 +121,7 @@ type Command struct {
 	WorkingDir         string            `json:"working_dir"`
 	ConfigPath         string            `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
 	Network            *Network          `json:"network"`
+	Ipc                *Ipc              `json:"ipc"`
 	Resources          *Resources        `json:"resources"`
 	Mounts             []Mount           `json:"mounts"`
 	AllowedDevices     []*devices.Device `json:"allowed_devices"`

+ 1 - 0
daemon/execdriver/lxc/MAINTAINERS

@@ -1 +1,2 @@
+# the LXC exec driver needs more maintainers and contributions
 Dinesh Subhraveti <dineshs@altiscale.com> (@dineshs-altiscale)

+ 17 - 18
daemon/execdriver/lxc/driver.go

@@ -17,8 +17,8 @@ import (
 
 	"github.com/kr/pty"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/utils"
 	"github.com/docker/libcontainer/cgroups"
@@ -55,7 +55,7 @@ func (d *driver) Name() string {
 	return fmt.Sprintf("%s-%s", DriverName, version)
 }
 
-func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
+func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
 	var (
 		term execdriver.Terminal
 		err  error
@@ -76,20 +76,27 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	})
 
 	if err := d.generateEnvConfig(c); err != nil {
-		return -1, err
+		return execdriver.ExitStatus{-1, false}, err
 	}
 	configPath, err := d.generateLXCConfig(c)
 	if err != nil {
-		return -1, err
+		return execdriver.ExitStatus{-1, false}, err
 	}
 	params := []string{
 		"lxc-start",
 		"-n", c.ID,
 		"-f", configPath,
-		"--",
-		c.InitPath,
+	}
+	if c.Network.ContainerID != "" {
+		params = append(params,
+			"--share-net", c.Network.ContainerID,
+		)
 	}
 
+	params = append(params,
+		"--",
+		c.InitPath,
+	)
 	if c.Network.Interface != nil {
 		params = append(params,
 			"-g", c.Network.Interface.Gateway,
@@ -116,14 +123,6 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		params = append(params, "-w", c.WorkingDir)
 	}
 
-	if len(c.CapAdd) > 0 {
-		params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":")))
-	}
-
-	if len(c.CapDrop) > 0 {
-		params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":")))
-	}
-
 	params = append(params, "--", c.ProcessConfig.Entrypoint)
 	params = append(params, c.ProcessConfig.Arguments...)
 
@@ -155,11 +154,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	c.ProcessConfig.Args = append([]string{name}, arg...)
 
 	if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
-		return -1, err
+		return execdriver.ExitStatus{-1, false}, err
 	}
 
 	if err := c.ProcessConfig.Start(); err != nil {
-		return -1, err
+		return execdriver.ExitStatus{-1, false}, err
 	}
 
 	var (
@@ -183,7 +182,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 			c.ProcessConfig.Process.Kill()
 			c.ProcessConfig.Wait()
 		}
-		return -1, err
+		return execdriver.ExitStatus{-1, false}, err
 	}
 
 	c.ContainerPid = pid
@@ -194,7 +193,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 
 	<-waitLock
 
-	return getExitCode(c), waitErr
+	return execdriver.ExitStatus{getExitCode(c), false}, waitErr
 }
 
 /// Return the exit code of the process

+ 1 - 61
daemon/execdriver/lxc/init.go

@@ -6,7 +6,6 @@ import (
 	"fmt"
 	"io/ioutil"
 	"log"
-	"net"
 	"os"
 	"os/exec"
 	"runtime"
@@ -14,7 +13,6 @@ import (
 	"syscall"
 
 	"github.com/docker/docker/pkg/reexec"
-	"github.com/docker/libcontainer/netlink"
 )
 
 // Args provided to the init function for a driver
@@ -59,12 +57,7 @@ func setupNamespace(args *InitArgs) error {
 	if err := setupEnv(args); err != nil {
 		return err
 	}
-	if err := setupHostname(args); err != nil {
-		return err
-	}
-	if err := setupNetworking(args); err != nil {
-		return err
-	}
+
 	if err := finalizeNamespace(args); err != nil {
 		return err
 	}
@@ -138,59 +131,6 @@ func setupEnv(args *InitArgs) error {
 	return nil
 }
 
-func setupHostname(args *InitArgs) error {
-	hostname := getEnv(args, "HOSTNAME")
-	if hostname == "" {
-		return nil
-	}
-	return setHostname(hostname)
-}
-
-// Setup networking
-func setupNetworking(args *InitArgs) error {
-	if args.Ip != "" {
-		// eth0
-		iface, err := net.InterfaceByName("eth0")
-		if err != nil {
-			return fmt.Errorf("Unable to set up networking: %v", err)
-		}
-		ip, ipNet, err := net.ParseCIDR(args.Ip)
-		if err != nil {
-			return fmt.Errorf("Unable to set up networking: %v", err)
-		}
-		if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil {
-			return fmt.Errorf("Unable to set up networking: %v", err)
-		}
-		if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil {
-			return fmt.Errorf("Unable to set MTU: %v", err)
-		}
-		if err := netlink.NetworkLinkUp(iface); err != nil {
-			return fmt.Errorf("Unable to set up networking: %v", err)
-		}
-
-		// loopback
-		iface, err = net.InterfaceByName("lo")
-		if err != nil {
-			return fmt.Errorf("Unable to set up networking: %v", err)
-		}
-		if err := netlink.NetworkLinkUp(iface); err != nil {
-			return fmt.Errorf("Unable to set up networking: %v", err)
-		}
-	}
-	if args.Gateway != "" {
-		gw := net.ParseIP(args.Gateway)
-		if gw == nil {
-			return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway)
-		}
-
-		if err := netlink.AddDefaultGw(gw.String(), "eth0"); err != nil {
-			return fmt.Errorf("Unable to set up networking: %v", err)
-		}
-	}
-
-	return nil
-}
-
 // Setup working directory
 func setupWorkingDirectory(args *InitArgs) error {
 	if args.WorkDir == "" {

+ 0 - 55
daemon/execdriver/lxc/lxc_init_linux.go

@@ -2,74 +2,19 @@ package lxc
 
 import (
 	"fmt"
-	"strings"
-	"syscall"
-
-	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/libcontainer/namespaces"
-	"github.com/docker/libcontainer/security/capabilities"
-	"github.com/docker/libcontainer/system"
 	"github.com/docker/libcontainer/utils"
 )
 
-func setHostname(hostname string) error {
-	return syscall.Sethostname([]byte(hostname))
-}
-
 func finalizeNamespace(args *InitArgs) error {
 	if err := utils.CloseExecFrom(3); err != nil {
 		return err
 	}
 
-	// We use the native drivers default template so that things like caps are consistent
-	// across both drivers
-	container := template.New()
-
-	if !args.Privileged {
-		// drop capabilities in bounding set before changing user
-		if err := capabilities.DropBoundingSet(container.Capabilities); err != nil {
-			return fmt.Errorf("drop bounding set %s", err)
-		}
-
-		// preserve existing capabilities while we change users
-		if err := system.SetKeepCaps(); err != nil {
-			return fmt.Errorf("set keep caps %s", err)
-		}
-	}
-
 	if err := namespaces.SetupUser(args.User); err != nil {
 		return fmt.Errorf("setup user %s", err)
 	}
 
-	if !args.Privileged {
-		if err := system.ClearKeepCaps(); err != nil {
-			return fmt.Errorf("clear keep caps %s", err)
-		}
-
-		var (
-			adds  []string
-			drops []string
-		)
-
-		if args.CapAdd != "" {
-			adds = strings.Split(args.CapAdd, ":")
-		}
-		if args.CapDrop != "" {
-			drops = strings.Split(args.CapDrop, ":")
-		}
-
-		caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops)
-		if err != nil {
-			return err
-		}
-
-		// drop all other capabilities
-		if err := capabilities.DropCapabilities(caps); err != nil {
-			return fmt.Errorf("drop capabilities %s", err)
-		}
-	}
-
 	if err := setupWorkingDirectory(args); err != nil {
 		return err
 	}

+ 1 - 7
daemon/execdriver/lxc/lxc_init_unsupported.go

@@ -2,12 +2,6 @@
 
 package lxc
 
-import "github.com/docker/docker/daemon/execdriver"
-
-func setHostname(hostname string) error {
-	panic("Not supported on darwin")
-}
-
-func finalizeNamespace(args *execdriver.InitArgs) error {
+func finalizeNamespace(args *InitArgs) error {
 	panic("Not supported on darwin")
 }

+ 66 - 5
daemon/execdriver/lxc/lxc_template.go

@@ -1,11 +1,12 @@
 package lxc
 
 import (
-	"strings"
-	"text/template"
-
 	"github.com/docker/docker/daemon/execdriver"
+	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/libcontainer/label"
+	"os"
+	"strings"
+	"text/template"
 )
 
 const LxcTemplate = `
@@ -15,6 +16,13 @@ lxc.network.type = veth
 lxc.network.link = {{.Network.Interface.Bridge}}
 lxc.network.name = eth0
 lxc.network.mtu = {{.Network.Mtu}}
+{{if .Network.Interface.IPAddress}}
+lxc.network.ipv4 = {{.Network.Interface.IPAddress}}/{{.Network.Interface.IPPrefixLen}}
+{{end}}
+{{if .Network.Interface.Gateway}}
+lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}}
+{{end}}
+lxc.network.flags = up
 {{else if .Network.HostNetworking}}
 lxc.network.type = none
 {{else}}
@@ -70,10 +78,23 @@ lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMo
 lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" ""}} 0 0
 
 {{range $value := .Mounts}}
+{{$createVal := isDirectory $value.Source}}
 {{if $value.Writable}}
-lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw 0 0
+lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw,create={{$createVal}} 0 0
 {{else}}
-lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro 0 0
+lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro,create={{$createVal}} 0 0
+{{end}}
+{{end}}
+
+{{if .ProcessConfig.Env}}
+lxc.utsname = {{getHostname .ProcessConfig.Env}}
+{{end}}
+
+{{if .ProcessConfig.Privileged}}
+# No cap values are needed, as lxc is starting in privileged mode
+{{else}}
+{{range $value := keepCapabilities .CapAdd .CapDrop}}
+lxc.cap.keep = {{$value}}
 {{end}}
 {{end}}
 
@@ -117,6 +138,33 @@ func escapeFstabSpaces(field string) string {
 	return strings.Replace(field, " ", "\\040", -1)
 }
 
+func keepCapabilities(adds []string, drops []string) []string {
+	container := nativeTemplate.New()
+	caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops)
+	var newCaps []string
+	for _, cap := range caps {
+		newCaps = append(newCaps, strings.ToLower(cap))
+	}
+	if err != nil {
+		return []string{}
+	}
+	return newCaps
+}
+
+func isDirectory(source string) string {
+	f, err := os.Stat(source)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return "dir"
+		}
+		return ""
+	}
+	if f.IsDir() {
+		return "dir"
+	}
+	return "file"
+}
+
 func getMemorySwap(v *execdriver.Resources) int64 {
 	// By default, MemorySwap is set to twice the size of RAM.
 	// If you want to omit MemorySwap, set it to `-1'.
@@ -137,12 +185,25 @@ func getLabel(c map[string][]string, name string) string {
 	return ""
 }
 
+func getHostname(env []string) string {
+	for _, kv := range env {
+		parts := strings.SplitN(kv, "=", 2)
+		if parts[0] == "HOSTNAME" && len(parts) == 2 {
+			return parts[1]
+		}
+	}
+	return ""
+}
+
 func init() {
 	var err error
 	funcMap := template.FuncMap{
 		"getMemorySwap":     getMemorySwap,
 		"escapeFstabSpaces": escapeFstabSpaces,
 		"formatMountLabel":  label.FormatMountLabel,
+		"isDirectory":       isDirectory,
+		"keepCapabilities":  keepCapabilities,
+		"getHostname":       getHostname,
 	}
 	LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)
 	if err != nil {

+ 160 - 0
daemon/execdriver/lxc/lxc_template_unit_test.go

@@ -14,6 +14,7 @@ import (
 	"time"
 
 	"github.com/docker/docker/daemon/execdriver"
+	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/libcontainer/devices"
 )
 
@@ -104,6 +105,10 @@ func TestCustomLxcConfig(t *testing.T) {
 }
 
 func grepFile(t *testing.T, path string, pattern string) {
+	grepFileWithReverse(t, path, pattern, false)
+}
+
+func grepFileWithReverse(t *testing.T, path string, pattern string, inverseGrep bool) {
 	f, err := os.Open(path)
 	if err != nil {
 		t.Fatal(err)
@@ -117,9 +122,15 @@ func grepFile(t *testing.T, path string, pattern string) {
 	for err == nil {
 		line, err = r.ReadString('\n')
 		if strings.Contains(line, pattern) == true {
+			if inverseGrep {
+				t.Fatalf("grepFile: pattern \"%s\" found in \"%s\"", pattern, path)
+			}
 			return
 		}
 	}
+	if inverseGrep {
+		return
+	}
 	t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
 }
 
@@ -140,3 +151,152 @@ func TestEscapeFstabSpaces(t *testing.T) {
 		}
 	}
 }
+
+func TestIsDirectory(t *testing.T) {
+	tempDir, err := ioutil.TempDir("", "TestIsDir")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tempDir)
+
+	tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if isDirectory(tempDir) != "dir" {
+		t.Logf("Could not identify %s as a directory", tempDir)
+		t.Fail()
+	}
+
+	if isDirectory(tempFile.Name()) != "file" {
+		t.Logf("Could not identify %s as a file", tempFile.Name())
+		t.Fail()
+	}
+}
+
+func TestCustomLxcConfigMounts(t *testing.T) {
+	root, err := ioutil.TempDir("", "TestCustomLxcConfig")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(root)
+	tempDir, err := ioutil.TempDir("", "TestIsDir")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tempDir)
+
+	tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile")
+	if err != nil {
+		t.Fatal(err)
+	}
+	os.MkdirAll(path.Join(root, "containers", "1"), 0777)
+
+	driver, err := NewDriver(root, "", false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	processConfig := execdriver.ProcessConfig{
+		Privileged: false,
+	}
+	mounts := []execdriver.Mount{
+		{
+			Source:      tempDir,
+			Destination: tempDir,
+			Writable:    false,
+			Private:     true,
+		},
+		{
+			Source:      tempFile.Name(),
+			Destination: tempFile.Name(),
+			Writable:    true,
+			Private:     true,
+		},
+	}
+	command := &execdriver.Command{
+		ID: "1",
+		LxcConfig: []string{
+			"lxc.utsname = docker",
+			"lxc.cgroup.cpuset.cpus = 0,1",
+		},
+		Network: &execdriver.Network{
+			Mtu:       1500,
+			Interface: nil,
+		},
+		Mounts:        mounts,
+		ProcessConfig: processConfig,
+	}
+
+	p, err := driver.generateLXCConfig(command)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	grepFile(t, p, "lxc.utsname = docker")
+	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
+
+	grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,ro,create=%s 0 0", tempDir, "/"+tempDir, "dir"))
+	grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,rw,create=%s 0 0", tempFile.Name(), "/"+tempFile.Name(), "file"))
+}
+
+func TestCustomLxcConfigMisc(t *testing.T) {
+	root, err := ioutil.TempDir("", "TestCustomLxcConfig")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(root)
+	os.MkdirAll(path.Join(root, "containers", "1"), 0777)
+	driver, err := NewDriver(root, "", false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	processConfig := execdriver.ProcessConfig{
+		Privileged: false,
+	}
+
+	processConfig.Env = []string{"HOSTNAME=testhost"}
+	command := &execdriver.Command{
+		ID: "1",
+		LxcConfig: []string{
+			"lxc.cgroup.cpuset.cpus = 0,1",
+		},
+		Network: &execdriver.Network{
+			Mtu: 1500,
+			Interface: &execdriver.NetworkInterface{
+				Gateway:     "10.10.10.1",
+				IPAddress:   "10.10.10.10",
+				IPPrefixLen: 24,
+				Bridge:      "docker0",
+			},
+		},
+		ProcessConfig: processConfig,
+		CapAdd:        []string{"net_admin", "syslog"},
+		CapDrop:       []string{"kill", "mknod"},
+	}
+
+	p, err := driver.generateLXCConfig(command)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// network
+	grepFile(t, p, "lxc.network.type = veth")
+	grepFile(t, p, "lxc.network.link = docker0")
+	grepFile(t, p, "lxc.network.name = eth0")
+	grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24")
+	grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1")
+	grepFile(t, p, "lxc.network.flags = up")
+
+	// hostname
+	grepFile(t, p, "lxc.utsname = testhost")
+	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
+	container := nativeTemplate.New()
+	for _, cap := range container.Capabilities {
+		cap = strings.ToLower(cap)
+		if cap != "mknod" && cap != "kill" {
+			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", cap))
+		}
+	}
+	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = kill"), true)
+	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = mknod"), true)
+}

+ 26 - 0
daemon/execdriver/native/create.go

@@ -36,6 +36,10 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
 	container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
 	container.RestrictSys = true
 
+	if err := d.createIpc(container, c); err != nil {
+		return nil, err
+	}
+
 	if err := d.createNetwork(container, c); err != nil {
 		return nil, err
 	}
@@ -124,6 +128,28 @@ func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Com
 	return nil
 }
 
+func (d *driver) createIpc(container *libcontainer.Config, c *execdriver.Command) error {
+	if c.Ipc.HostIpc {
+		container.Namespaces["NEWIPC"] = false
+		return nil
+	}
+
+	if c.Ipc.ContainerID != "" {
+		d.Lock()
+		active := d.activeContainers[c.Ipc.ContainerID]
+		d.Unlock()
+
+		if active == nil || active.cmd.Process == nil {
+			return fmt.Errorf("%s is not a valid running container to join", c.Ipc.ContainerID)
+		}
+		cmd := active.cmd
+
+		container.IpcNsPath = filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "ipc")
+	}
+
+	return nil
+}
+
 func (d *driver) setPrivileged(container *libcontainer.Config) (err error) {
 	container.Capabilities = capabilities.GetAllCapabilities()
 	container.Cgroups.AllowAllDevices = true

+ 65 - 29
daemon/execdriver/native/driver.go

@@ -14,6 +14,7 @@ import (
 	"sync"
 	"syscall"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/libcontainer"
@@ -60,11 +61,20 @@ func NewDriver(root, initPath string) (*driver, error) {
 	}, nil
 }
 
-func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
+func (d *driver) notifyOnOOM(config *libcontainer.Config) (<-chan struct{}, error) {
+	return fs.NotifyOnOOM(config.Cgroups)
+}
+
+type execOutput struct {
+	exitCode int
+	err      error
+}
+
+func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
 	// take the Command and populate the libcontainer.Config from it
 	container, err := d.createContainer(c)
 	if err != nil {
-		return -1, err
+		return execdriver.ExitStatus{-1, false}, err
 	}
 
 	var term execdriver.Terminal
@@ -75,7 +85,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
 	}
 	if err != nil {
-		return -1, err
+		return execdriver.ExitStatus{-1, false}, err
 	}
 	c.ProcessConfig.Terminal = term
 
@@ -92,40 +102,66 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	)
 
 	if err := d.createContainerRoot(c.ID); err != nil {
-		return -1, err
+		return execdriver.ExitStatus{-1, false}, err
 	}
 	defer d.cleanContainer(c.ID)
 
 	if err := d.writeContainerFile(container, c.ID); err != nil {
-		return -1, err
+		return execdriver.ExitStatus{-1, false}, err
 	}
 
-	return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd {
-		c.ProcessConfig.Path = d.initPath
-		c.ProcessConfig.Args = append([]string{
-			DriverName,
-			"-console", console,
-			"-pipe", "3",
-			"-root", filepath.Join(d.root, c.ID),
-			"--",
-		}, args...)
-
-		// set this to nil so that when we set the clone flags anything else is reset
-		c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
-			Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
-		}
-		c.ProcessConfig.ExtraFiles = []*os.File{child}
+	execOutputChan := make(chan execOutput, 1)
+	waitForStart := make(chan struct{})
 
-		c.ProcessConfig.Env = container.Env
-		c.ProcessConfig.Dir = container.RootFs
+	go func() {
+		exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd {
+			c.ProcessConfig.Path = d.initPath
+			c.ProcessConfig.Args = append([]string{
+				DriverName,
+				"-console", console,
+				"-pipe", "3",
+				"-root", filepath.Join(d.root, c.ID),
+				"--",
+			}, args...)
+
+			// set this to nil so that when we set the clone flags anything else is reset
+			c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
+				Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
+			}
+			c.ProcessConfig.ExtraFiles = []*os.File{child}
 
-		return &c.ProcessConfig.Cmd
-	}, func() {
-		if startCallback != nil {
-			c.ContainerPid = c.ProcessConfig.Process.Pid
-			startCallback(&c.ProcessConfig, c.ContainerPid)
-		}
-	})
+			c.ProcessConfig.Env = container.Env
+			c.ProcessConfig.Dir = container.RootFs
+
+			return &c.ProcessConfig.Cmd
+		}, func() {
+			close(waitForStart)
+			if startCallback != nil {
+				c.ContainerPid = c.ProcessConfig.Process.Pid
+				startCallback(&c.ProcessConfig, c.ContainerPid)
+			}
+		})
+		execOutputChan <- execOutput{exitCode, err}
+	}()
+
+	select {
+	case execOutput := <-execOutputChan:
+		return execdriver.ExitStatus{execOutput.exitCode, false}, execOutput.err
+	case <-waitForStart:
+		break
+	}
+
+	oomKill := false
+	oomKillNotification, err := d.notifyOnOOM(container)
+	if err == nil {
+		_, oomKill = <-oomKillNotification
+	} else {
+		log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
+	}
+	// wait for the container to exit.
+	execOutput := <-execOutputChan
+
+	return execdriver.ExitStatus{execOutput.exitCode, oomKill}, execOutput.err
 }
 
 func (d *driver) Kill(p *execdriver.Command, sig int) error {

+ 1 - 7
daemon/execdriver/native/init.go

@@ -13,7 +13,6 @@ import (
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/namespaces"
-	"github.com/docker/libcontainer/syncpipe"
 )
 
 func init() {
@@ -48,12 +47,7 @@ func initializer() {
 		writeError(err)
 	}
 
-	syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(*pipe))
-	if err != nil {
-		writeError(err)
-	}
-
-	if err := namespaces.Init(container, rootfs, *console, syncPipe, flag.Args()); err != nil {
+	if err := namespaces.Init(container, rootfs, *console, os.NewFile(uintptr(*pipe), "child"), flag.Args()); err != nil {
 		writeError(err)
 	}
 

+ 2 - 8
daemon/execdriver/native/utils.go

@@ -3,10 +3,10 @@
 package native
 
 import (
+	"encoding/json"
 	"os"
 
 	"github.com/docker/libcontainer"
-	"github.com/docker/libcontainer/syncpipe"
 )
 
 func findUserArgs() []string {
@@ -21,15 +21,9 @@ func findUserArgs() []string {
 // loadConfigFromFd loads a container's config from the sync pipe that is provided by
 // fd 3 when running a process
 func loadConfigFromFd() (*libcontainer.Config, error) {
-	syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3)
-	if err != nil {
-		return nil, err
-	}
-
 	var config *libcontainer.Config
-	if err := syncPipe.ReadFromParent(&config); err != nil {
+	if err := json.NewDecoder(os.NewFile(3, "child")).Decode(&config); err != nil {
 		return nil, err
 	}
-
 	return config, nil
 }

+ 36 - 30
daemon/graphdriver/aufs/aufs.go

@@ -30,10 +30,10 @@ import (
 	"sync"
 	"syscall"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
-	"github.com/docker/docker/pkg/log"
 	mountpk "github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/utils"
 	"github.com/docker/libcontainer/label"
@@ -99,7 +99,7 @@ func Init(root string, options []string) (graphdriver.Driver, error) {
 		return nil, err
 	}
 
-	if err := graphdriver.MakePrivate(root); err != nil {
+	if err := mountpk.MakePrivate(root); err != nil {
 		return nil, err
 	}
 
@@ -301,6 +301,7 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
 	// AUFS doesn't need the parent layer to produce a diff.
 	return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
 		Compression: archive.Uncompressed,
+		Excludes:    []string{".wh..wh.*"},
 	})
 }
 
@@ -412,39 +413,44 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
 		}
 	}()
 
-	if err = a.tryMount(ro, rw, target, mountLabel); err != nil {
-		if err = a.mountRw(rw, target, mountLabel); err != nil {
-			return
+	// Mount options are clipped to page size(4096 bytes). If there are more
+	// layers then these are remounted individually using append.
+
+	b := make([]byte, syscall.Getpagesize()-len(mountLabel)-50) // room for xino & mountLabel
+	bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
+
+	firstMount := true
+	i := 0
+
+	for {
+		for ; i < len(ro); i++ {
+			layer := fmt.Sprintf(":%s=ro+wh", ro[i])
+
+			if firstMount {
+				if bp+len(layer) > len(b) {
+					break
+				}
+				bp += copy(b[bp:], layer)
+			} else {
+				data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
+				if err = mount("none", target, "aufs", MsRemount, data); err != nil {
+					return
+				}
+			}
 		}
 
-		for _, layer := range ro {
-			data := label.FormatMountLabel(fmt.Sprintf("append:%s=ro+wh", layer), mountLabel)
-			if err = mount("none", target, "aufs", MsRemount, data); err != nil {
+		if firstMount {
+			data := label.FormatMountLabel(fmt.Sprintf("%s,xino=/dev/shm/aufs.xino", string(b[:bp])), mountLabel)
+			if err = mount("none", target, "aufs", 0, data); err != nil {
 				return
 			}
+			firstMount = false
 		}
-	}
-	return
-}
-
-// Try to mount using the aufs fast path, if this fails then
-// append ro layers.
-func (a *Driver) tryMount(ro []string, rw, target, mountLabel string) (err error) {
-	var (
-		rwBranch   = fmt.Sprintf("%s=rw", rw)
-		roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:"))
-		data       = label.FormatMountLabel(fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches), mountLabel)
-	)
-	return mount("none", target, "aufs", 0, data)
-}
-
-func (a *Driver) mountRw(rw, target, mountLabel string) error {
-	data := label.FormatMountLabel(fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw), mountLabel)
-	return mount("none", target, "aufs", 0, data)
-}
 
-func rollbackMount(target string, err error) {
-	if err != nil {
-		Unmount(target)
+		if i == len(ro) {
+			break
+		}
 	}
+
+	return
 }

+ 40 - 11
daemon/graphdriver/aufs/aufs_test.go

@@ -15,7 +15,8 @@ import (
 )
 
 var (
-	tmp = path.Join(os.TempDir(), "aufs-tests", "aufs")
+	tmpOuter = path.Join(os.TempDir(), "aufs-tests")
+	tmp      = path.Join(tmpOuter, "aufs")
 )
 
 func init() {
@@ -641,9 +642,13 @@ func hash(c string) string {
 	return hex.EncodeToString(h.Sum(nil))
 }
 
-func TestMountMoreThan42Layers(t *testing.T) {
-	d := newDriver(t)
-	defer os.RemoveAll(tmp)
+func testMountMoreThan42Layers(t *testing.T, mountPath string) {
+	if err := os.MkdirAll(mountPath, 0755); err != nil {
+		t.Fatal(err)
+	}
+
+	defer os.RemoveAll(mountPath)
+	d := testInit(mountPath, t).(*Driver)
 	defer d.Cleanup()
 	var last string
 	var expected int
@@ -664,24 +669,24 @@ func TestMountMoreThan42Layers(t *testing.T) {
 
 		if err := d.Create(current, parent); err != nil {
 			t.Logf("Current layer %d", i)
-			t.Fatal(err)
+			t.Error(err)
 		}
 		point, err := d.Get(current, "")
 		if err != nil {
 			t.Logf("Current layer %d", i)
-			t.Fatal(err)
+			t.Error(err)
 		}
 		f, err := os.Create(path.Join(point, current))
 		if err != nil {
 			t.Logf("Current layer %d", i)
-			t.Fatal(err)
+			t.Error(err)
 		}
 		f.Close()
 
 		if i%10 == 0 {
 			if err := os.Remove(path.Join(point, parent)); err != nil {
 				t.Logf("Current layer %d", i)
-				t.Fatal(err)
+				t.Error(err)
 			}
 			expected--
 		}
@@ -691,13 +696,37 @@ func TestMountMoreThan42Layers(t *testing.T) {
 	// Perform the actual mount for the top most image
 	point, err := d.Get(last, "")
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 	files, err := ioutil.ReadDir(point)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 	if len(files) != expected {
-		t.Fatalf("Expected %d got %d", expected, len(files))
+		t.Errorf("Expected %d got %d", expected, len(files))
+	}
+}
+
+func TestMountMoreThan42Layers(t *testing.T) {
+	os.RemoveAll(tmpOuter)
+	testMountMoreThan42Layers(t, tmp)
+}
+
+func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) {
+	defer os.RemoveAll(tmpOuter)
+	zeroes := "0"
+	for {
+		// This finds a mount path so that when combined into aufs mount options
+		// 4096 byte boundary would be in between the paths or in permission
+		// section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs'
+		mountPath := path.Join(tmpOuter, zeroes, "aufs")
+		pathLength := 77 + len(mountPath)
+
+		if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 {
+			t.Logf("Using path: %s", mountPath)
+			testMountMoreThan42Layers(t, mountPath)
+			return
+		}
+		zeroes += "0"
 	}
 }

+ 1 - 1
daemon/graphdriver/aufs/mount.go

@@ -4,7 +4,7 @@ import (
 	"os/exec"
 	"syscall"
 
-	"github.com/docker/docker/pkg/log"
+	log "github.com/Sirupsen/logrus"
 )
 
 func Unmount(target string) error {

+ 9 - 2
daemon/graphdriver/btrfs/btrfs.go

@@ -40,7 +40,7 @@ func Init(home string, options []string) (graphdriver.Driver, error) {
 		return nil, err
 	}
 
-	if err := graphdriver.MakePrivate(home); err != nil {
+	if err := mount.MakePrivate(home); err != nil {
 		return nil, err
 	}
 
@@ -60,7 +60,14 @@ func (d *Driver) String() string {
 }
 
 func (d *Driver) Status() [][2]string {
-	return nil
+	status := [][2]string{}
+	if bv := BtrfsBuildVersion(); bv != "-" {
+		status = append(status, [2]string{"Build Version", bv})
+	}
+	if lv := BtrfsLibVersion(); lv != -1 {
+		status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)})
+	}
+	return status
 }
 
 func (d *Driver) Cleanup() error {

+ 24 - 0
daemon/graphdriver/btrfs/version.go

@@ -0,0 +1,24 @@
+// +build linux,!btrfs_noversion
+
+package btrfs
+
+/*
+#include <btrfs/version.h>
+
+// because around version 3.16, they did not define lib version yet
+int my_btrfs_lib_version() {
+#ifdef BTRFS_LIB_VERSION
+  return BTRFS_LIB_VERSION;
+#else
+  return -1;
+#endif
+}
+*/
+import "C"
+
+func BtrfsBuildVersion() string {
+	return string(C.BTRFS_BUILD_VERSION)
+}
+func BtrfsLibVersion() int {
+	return int(C.BTRFS_LIB_VERSION)
+}

+ 13 - 0
daemon/graphdriver/btrfs/version_none.go

@@ -0,0 +1,13 @@
+// +build linux,btrfs_noversion
+
+package btrfs
+
+// TODO(vbatts) remove this work-around once supported linux distros are on
+// btrfs utililties of >= 3.16.1
+
+func BtrfsBuildVersion() string {
+	return "-"
+}
+func BtrfsLibVersion() int {
+	return -1
+}

+ 13 - 0
daemon/graphdriver/btrfs/version_test.go

@@ -0,0 +1,13 @@
+// +build linux
+
+package btrfs
+
+import (
+	"testing"
+)
+
+func TestBuildVersion(t *testing.T) {
+	if len(BtrfsBuildVersion()) == 0 {
+		t.Errorf("expected output from btrfs build version, but got empty string")
+	}
+}

+ 1 - 0
daemon/graphdriver/devmapper/MAINTAINERS

@@ -1 +1,2 @@
 Alexander Larsson <alexl@redhat.com> (@alexlarsson)
+Vincent Batts <vbatts@redhat.com> (@vbatts)

+ 19 - 0
daemon/graphdriver/devmapper/README.md

@@ -100,6 +100,25 @@ Here is the list of supported options:
 
     ``docker -d --storage-opt dm.mountopt=nodiscard``
 
+ *  `dm.thinpooldev`
+
+    Specifies a custom blockdevice to use for the thin pool.
+
+    If using a block device for device mapper storage, ideally lvm2
+    would be used to create/manage the thin-pool volume that is then
+    handed to docker to exclusively create/manage the thin and thin
+    snapshot volumes needed for it's containers.  Managing the thin-pool
+    outside of docker makes for the most feature-rich method of having
+    docker utilize device mapper thin provisioning as the backing
+    storage for docker's containers.  lvm2-based thin-pool management
+    feature highlights include: automatic or interactive thin-pool
+    resize support, dynamically change thin-pool features, automatic
+    thinp metadata checking when lvm2 activates the thin-pool, etc.
+
+    Example use:
+
+    ``docker -d --storage-opt dm.thinpooldev=/dev/mapper/thin-pool``
+
  *  `dm.datadev`
 
     Specifies a custom blockdevice to use for data for the thin pool.

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 523 - 116
daemon/graphdriver/devmapper/deviceset.go


+ 5 - 1
daemon/graphdriver/devmapper/devmapper_test.go

@@ -3,8 +3,9 @@
 package devmapper
 
 import (
-	"github.com/docker/docker/daemon/graphdriver/graphtest"
 	"testing"
+
+	"github.com/docker/docker/daemon/graphdriver/graphtest"
 )
 
 func init() {
@@ -12,6 +13,9 @@ func init() {
 	DefaultDataLoopbackSize = 300 * 1024 * 1024
 	DefaultMetaDataLoopbackSize = 200 * 1024 * 1024
 	DefaultBaseFsSize = 300 * 1024 * 1024
+	if err := graphtest.InitLoopbacks(); err != nil {
+		panic(err)
+	}
 }
 
 // This avoids creating a new driver for each test if all tests are run

+ 4 - 3
daemon/graphdriver/devmapper/driver.go

@@ -8,8 +8,9 @@ import (
 	"os"
 	"path"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/graphdriver"
-	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/devicemapper"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/units"
 )
@@ -34,7 +35,7 @@ func Init(home string, options []string) (graphdriver.Driver, error) {
 		return nil, err
 	}
 
-	if err := graphdriver.MakePrivate(home); err != nil {
+	if err := mount.MakePrivate(home); err != nil {
 		return nil, err
 	}
 
@@ -63,7 +64,7 @@ func (d *Driver) Status() [][2]string {
 		{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Used)))},
 		{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Total)))},
 	}
-	if vStr, err := GetLibraryVersion(); err == nil {
+	if vStr, err := devicemapper.GetLibraryVersion(); err == nil {
 		status = append(status, [2]string{"Library Version", vStr})
 	}
 	return status

+ 2 - 16
daemon/graphdriver/driver.go

@@ -7,7 +7,6 @@ import (
 	"path"
 
 	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/mount"
 )
 
 type FsMagic uint64
@@ -81,6 +80,8 @@ var (
 		"btrfs",
 		"devicemapper",
 		"vfs",
+		// experimental, has to be enabled manually for now
+		"overlay",
 	}
 
 	ErrNotSupported   = errors.New("driver not supported")
@@ -139,18 +140,3 @@ func New(root string, options []string) (driver Driver, err error) {
 	}
 	return nil, fmt.Errorf("No supported storage backend found")
 }
-
-func MakePrivate(mountPoint string) error {
-	mounted, err := mount.Mounted(mountPoint)
-	if err != nil {
-		return err
-	}
-
-	if !mounted {
-		if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
-			return err
-		}
-	}
-
-	return mount.ForceMount("", mountPoint, "none", "private")
-}

+ 3 - 1
daemon/graphdriver/fsdiff.go

@@ -1,13 +1,15 @@
+// +build daemon
+
 package graphdriver
 
 import (
 	"fmt"
 	"time"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/utils"
 )
 

+ 42 - 1
daemon/graphdriver/graphtest/graphtest.go

@@ -1,6 +1,7 @@
 package graphtest
 
 import (
+	"fmt"
 	"io/ioutil"
 	"os"
 	"path"
@@ -20,6 +21,46 @@ type Driver struct {
 	refCount int
 }
 
+// InitLoopbacks ensures that the loopback devices are properly created within
+// the system running the device mapper tests.
+func InitLoopbacks() error {
+	stat_t, err := getBaseLoopStats()
+	if err != nil {
+		return err
+	}
+	// create atleast 8 loopback files, ya, that is a good number
+	for i := 0; i < 8; i++ {
+		loopPath := fmt.Sprintf("/dev/loop%d", i)
+		// only create new loopback files if they don't exist
+		if _, err := os.Stat(loopPath); err != nil {
+			if mkerr := syscall.Mknod(loopPath,
+				uint32(stat_t.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
+				return mkerr
+			}
+			os.Chown(loopPath, int(stat_t.Uid), int(stat_t.Gid))
+		}
+	}
+	return nil
+}
+
+// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the
+// loop0 device on the system.  If it does not exist we assume 0,0,0660 for the
+// stat data
+func getBaseLoopStats() (*syscall.Stat_t, error) {
+	loop0, err := os.Stat("/dev/loop0")
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &syscall.Stat_t{
+				Uid:  0,
+				Gid:  0,
+				Mode: 0660,
+			}, nil
+		}
+		return nil, err
+	}
+	return loop0.Sys().(*syscall.Stat_t), nil
+}
+
 func newDriver(t *testing.T, name string) *Driver {
 	root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-")
 	if err != nil {
@@ -33,7 +74,7 @@ func newDriver(t *testing.T, name string) *Driver {
 	d, err := graphdriver.GetDriver(name, root, nil)
 	if err != nil {
 		if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites {
-			t.Skip("Driver %s not supported", name)
+			t.Skipf("Driver %s not supported", name)
 		}
 		t.Fatal(err)
 	}

+ 157 - 0
daemon/graphdriver/overlay/copy.go

@@ -0,0 +1,157 @@
+// +build linux
+
+package overlay
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"syscall"
+
+	"github.com/docker/docker/pkg/system"
+)
+
+type CopyFlags int
+
+const (
+	CopyHardlink CopyFlags = 1 << iota
+)
+
+func copyRegular(srcPath, dstPath string, mode os.FileMode) error {
+	srcFile, err := os.Open(srcPath)
+	if err != nil {
+		return err
+	}
+	defer srcFile.Close()
+
+	dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode)
+	if err != nil {
+		return err
+	}
+	defer dstFile.Close()
+
+	_, err = io.Copy(dstFile, srcFile)
+
+	return err
+}
+
+func copyXattr(srcPath, dstPath, attr string) error {
+	data, err := system.Lgetxattr(srcPath, attr)
+	if err != nil {
+		return err
+	}
+	if data != nil {
+		if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func copyDir(srcDir, dstDir string, flags CopyFlags) error {
+	err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		// Rebase path
+		relPath, err := filepath.Rel(srcDir, srcPath)
+		if err != nil {
+			return err
+		}
+
+		dstPath := filepath.Join(dstDir, relPath)
+		if err != nil {
+			return err
+		}
+
+		stat, ok := f.Sys().(*syscall.Stat_t)
+		if !ok {
+			return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
+		}
+
+		switch f.Mode() & os.ModeType {
+		case 0: // Regular file
+			if flags&CopyHardlink != 0 {
+				if err := os.Link(srcPath, dstPath); err != nil {
+					return err
+				}
+			} else {
+				if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil {
+					return err
+				}
+			}
+
+		case os.ModeDir:
+			if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
+				return err
+			}
+
+		case os.ModeSymlink:
+			link, err := os.Readlink(srcPath)
+			if err != nil {
+				return err
+			}
+
+			if err := os.Symlink(link, dstPath); err != nil {
+				return err
+			}
+
+		case os.ModeNamedPipe:
+			fallthrough
+		case os.ModeSocket:
+			if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil {
+				return err
+			}
+
+		case os.ModeDevice:
+			if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
+				return err
+			}
+
+		default:
+			return fmt.Errorf("Unknown file type for %s\n", srcPath)
+		}
+
+		if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
+			return err
+		}
+
+		if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
+			return err
+		}
+
+		// We need to copy this attribute if it appears in an overlay upper layer, as
+		// this function is used to copy those. It is set by overlay if a directory
+		// is removed and then re-created and should not inherit anything from the
+		// same dir in the lower dir.
+		if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
+			return err
+		}
+
+		isSymlink := f.Mode()&os.ModeSymlink != 0
+
+		// There is no LChmod, so ignore mode for symlink. Also, this
+		// must happen after chown, as that can modify the file mode
+		if !isSymlink {
+			if err := os.Chmod(dstPath, f.Mode()); err != nil {
+				return err
+			}
+		}
+
+		ts := []syscall.Timespec{stat.Atim, stat.Mtim}
+		// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
+		if !isSymlink {
+			if err := system.UtimesNano(dstPath, ts); err != nil {
+				return err
+			}
+		} else {
+			if err := system.LUtimesNano(dstPath, ts); err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+	return err
+}

+ 370 - 0
daemon/graphdriver/overlay/overlay.go

@@ -0,0 +1,370 @@
+// +build linux
+
+package overlay
+
+import (
+	"bufio"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path"
+	"sync"
+	"syscall"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/libcontainer/label"
+)
+
+// This is a small wrapper over the NaiveDiffWriter that lets us have a custom
+// implementation of ApplyDiff()
+
+var (
+	ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff")
+)
+
+type ApplyDiffProtoDriver interface {
+	graphdriver.ProtoDriver
+	ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error)
+}
+
+type naiveDiffDriverWithApply struct {
+	graphdriver.Driver
+	applyDiff ApplyDiffProtoDriver
+}
+
+func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver) graphdriver.Driver {
+	return &naiveDiffDriverWithApply{
+		Driver:    graphdriver.NaiveDiffDriver(driver),
+		applyDiff: driver,
+	}
+}
+
+func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.ArchiveReader) (int64, error) {
+	b, err := d.applyDiff.ApplyDiff(id, parent, diff)
+	if err == ErrApplyDiffFallback {
+		return d.Driver.ApplyDiff(id, parent, diff)
+	}
+	return b, err
+}
+
+// This backend uses the overlay union filesystem for containers
+// plus hard link file sharing for images.
+
+// Each container/image can have a "root" subdirectory which is a plain
+// filesystem hierarchy, or they can use overlay.
+
+// If they use overlay there is a "upper" directory and a "lower-id"
+// file, as well as "merged" and "work" directories. The "upper"
+// directory has the upper layer of the overlay, and "lower-id" contains
+// the id of the parent whose "root" directory shall be used as the lower
+// layer in the overlay. The overlay itself is mounted in the "merged"
+// directory, and the "work" dir is needed for overlay to work.
+
+// When a overlay layer is created there are two cases, either the
+// parent has a "root" dir, then we start out with a empty "upper"
+// directory overlaid on the parents root. This is typically the
+// case with the init layer of a container which is based on an image.
+// If there is no "root" in the parent, we inherit the lower-id from
+// the parent and start by making a copy if the parents "upper" dir.
+// This is typically the case for a container layer which copies
+// its parent -init upper layer.
+
+// Additionally we also have a custom implementation of ApplyLayer
+// which makes a recursive copy of the parent "root" layer using
+// hardlinks to share file data, and then applies the layer on top
+// of that. This means all child images share file (but not directory)
+// data with the parent.
+
+type ActiveMount struct {
+	count   int
+	path    string
+	mounted bool
+}
+type Driver struct {
+	home       string
+	sync.Mutex // Protects concurrent modification to active
+	active     map[string]*ActiveMount
+}
+
+func init() {
+	graphdriver.Register("overlay", Init)
+}
+
+func Init(home string, options []string) (graphdriver.Driver, error) {
+	if err := supportsOverlay(); err != nil {
+		return nil, graphdriver.ErrNotSupported
+	}
+
+	// Create the driver home dir
+	if err := os.MkdirAll(home, 0755); err != nil && !os.IsExist(err) {
+		return nil, err
+	}
+
+	d := &Driver{
+		home:   home,
+		active: make(map[string]*ActiveMount),
+	}
+
+	return NaiveDiffDriverWithApply(d), nil
+}
+
+func supportsOverlay() error {
+	// We can try to modprobe overlay first before looking at
+	// proc/filesystems for when overlay is supported
+	exec.Command("modprobe", "overlay").Run()
+
+	f, err := os.Open("/proc/filesystems")
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	s := bufio.NewScanner(f)
+	for s.Scan() {
+		if s.Text() == "nodev\toverlay" {
+			return nil
+		}
+	}
+	log.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
+	return graphdriver.ErrNotSupported
+}
+
+func (d *Driver) String() string {
+	return "overlay"
+}
+
+func (d *Driver) Status() [][2]string {
+	return nil
+}
+
+func (d *Driver) Cleanup() error {
+	return nil
+}
+
+func (d *Driver) Create(id string, parent string) (retErr error) {
+	dir := d.dir(id)
+	if err := os.MkdirAll(path.Dir(dir), 0700); err != nil {
+		return err
+	}
+	if err := os.Mkdir(dir, 0700); err != nil {
+		return err
+	}
+
+	defer func() {
+		// Clean up on failure
+		if retErr != nil {
+			os.RemoveAll(dir)
+		}
+	}()
+
+	// Toplevel images are just a "root" dir
+	if parent == "" {
+		if err := os.Mkdir(path.Join(dir, "root"), 0755); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	parentDir := d.dir(parent)
+
+	// Ensure parent exists
+	if _, err := os.Lstat(parentDir); err != nil {
+		return err
+	}
+
+	// If parent has a root, just do a overlay to it
+	parentRoot := path.Join(parentDir, "root")
+
+	if s, err := os.Lstat(parentRoot); err == nil {
+		if err := os.Mkdir(path.Join(dir, "upper"), s.Mode()); err != nil {
+			return err
+		}
+		if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil {
+			return err
+		}
+		if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil {
+			return err
+		}
+		if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	// Otherwise, copy the upper and the lower-id from the parent
+
+	lowerId, err := ioutil.ReadFile(path.Join(parentDir, "lower-id"))
+	if err != nil {
+		return err
+	}
+
+	if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerId, 0666); err != nil {
+		return err
+	}
+
+	parentUpperDir := path.Join(parentDir, "upper")
+	s, err := os.Lstat(parentUpperDir)
+	if err != nil {
+		return err
+	}
+
+	upperDir := path.Join(dir, "upper")
+	if err := os.Mkdir(upperDir, s.Mode()); err != nil {
+		return err
+	}
+	if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil {
+		return err
+	}
+	if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil {
+		return err
+	}
+
+	return copyDir(parentUpperDir, upperDir, 0)
+}
+
+func (d *Driver) dir(id string) string {
+	return path.Join(d.home, id)
+}
+
+func (d *Driver) Remove(id string) error {
+	dir := d.dir(id)
+	if _, err := os.Stat(dir); err != nil {
+		return err
+	}
+	return os.RemoveAll(dir)
+}
+
+func (d *Driver) Get(id string, mountLabel string) (string, error) {
+	// Protect the d.active from concurrent access
+	d.Lock()
+	defer d.Unlock()
+
+	mount := d.active[id]
+	if mount != nil {
+		mount.count++
+		return mount.path, nil
+	} else {
+		mount = &ActiveMount{count: 1}
+	}
+
+	dir := d.dir(id)
+	if _, err := os.Stat(dir); err != nil {
+		return "", err
+	}
+
+	// If id has a root, just return it
+	rootDir := path.Join(dir, "root")
+	if _, err := os.Stat(rootDir); err == nil {
+		mount.path = rootDir
+		d.active[id] = mount
+		return mount.path, nil
+	}
+
+	lowerId, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
+	if err != nil {
+		return "", err
+	}
+	lowerDir := path.Join(d.dir(string(lowerId)), "root")
+	upperDir := path.Join(dir, "upper")
+	workDir := path.Join(dir, "work")
+	mergedDir := path.Join(dir, "merged")
+
+	opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
+	if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
+		return "", err
+	}
+	mount.path = mergedDir
+	mount.mounted = true
+	d.active[id] = mount
+
+	return mount.path, nil
+}
+
+func (d *Driver) Put(id string) {
+	// Protect the d.active from concurrent access
+	d.Lock()
+	defer d.Unlock()
+
+	mount := d.active[id]
+	if mount == nil {
+		log.Debugf("Put on a non-mounted device %s", id)
+		return
+	}
+
+	mount.count--
+	if mount.count > 0 {
+		return
+	}
+
+	if mount.mounted {
+		if err := syscall.Unmount(mount.path, 0); err != nil {
+			log.Debugf("Failed to unmount %s overlay: %v", id, err)
+		}
+	}
+
+	delete(d.active, id)
+}
+
+func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) (bytes int64, err error) {
+	dir := d.dir(id)
+
+	if parent == "" {
+		return 0, ErrApplyDiffFallback
+	}
+
+	parentRootDir := path.Join(d.dir(parent), "root")
+	if _, err := os.Stat(parentRootDir); err != nil {
+		return 0, ErrApplyDiffFallback
+	}
+
+	// We now know there is a parent, and it has a "root" directory containing
+	// the full root filesystem. We can just hardlink it and apply the
+	// layer. This relies on two things:
+	// 1) ApplyDiff is only run once on a clean (no writes to upper layer) container
+	// 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks)
+	// These are all currently true and are not expected to break
+
+	tmpRootDir, err := ioutil.TempDir(dir, "tmproot")
+	if err != nil {
+		return 0, err
+	}
+	defer func() {
+		if err != nil {
+			os.RemoveAll(tmpRootDir)
+		} else {
+			os.RemoveAll(path.Join(dir, "upper"))
+			os.RemoveAll(path.Join(dir, "work"))
+			os.RemoveAll(path.Join(dir, "merged"))
+			os.RemoveAll(path.Join(dir, "lower-id"))
+		}
+	}()
+
+	if err = copyDir(parentRootDir, tmpRootDir, CopyHardlink); err != nil {
+		return 0, err
+	}
+
+	if err := chrootarchive.ApplyLayer(tmpRootDir, diff); err != nil {
+		return 0, err
+	}
+
+	rootDir := path.Join(dir, "root")
+	if err := os.Rename(tmpRootDir, rootDir); err != nil {
+		return 0, err
+	}
+
+	changes, err := archive.ChangesDirs(rootDir, parentRootDir)
+	if err != nil {
+		return 0, err
+	}
+
+	return archive.ChangesSize(rootDir, changes), nil
+}
+
+func (d *Driver) Exists(id string) bool {
+	_, err := os.Stat(d.dir(id))
+	return err == nil
+}

+ 28 - 0
daemon/graphdriver/overlay/overlay_test.go

@@ -0,0 +1,28 @@
+package overlay
+
+import (
+	"github.com/docker/docker/daemon/graphdriver/graphtest"
+	"testing"
+)
+
+// This avoids creating a new driver for each test if all tests are run
+// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown
+func TestOverlaySetup(t *testing.T) {
+	graphtest.GetDriver(t, "overlay")
+}
+
+func TestOverlayCreateEmpty(t *testing.T) {
+	graphtest.DriverTestCreateEmpty(t, "overlay")
+}
+
+func TestOverlayCreateBase(t *testing.T) {
+	graphtest.DriverTestCreateBase(t, "overlay")
+}
+
+func TestOverlayCreateSnap(t *testing.T) {
+	graphtest.DriverTestCreateSnap(t, "overlay")
+}
+
+func TestOverlayTeardown(t *testing.T) {
+	graphtest.PutDriver(t)
+}

+ 3 - 0
daemon/image_delete.go

@@ -133,6 +133,9 @@ func (daemon *Daemon) canDeleteImage(imgID string, force bool) error {
 	for _, container := range daemon.List() {
 		parent, err := daemon.Repositories().LookupImage(container.Image)
 		if err != nil {
+			if daemon.Graph().IsNotExist(err) {
+				return nil
+			}
 			return err
 		}
 

+ 15 - 1
daemon/info.go

@@ -4,11 +4,12 @@ import (
 	"os"
 	"runtime"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/parsers/operatingsystem"
+	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/utils"
 )
@@ -37,6 +38,11 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
 		operatingSystem += " (containerized)"
 	}
 
+	meminfo, err := system.ReadMemInfo()
+	if err != nil {
+		log.Errorf("Could not read system memory info: %v", err)
+	}
+
 	// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
 	initPath := utils.DockerInitPath("")
 	if initPath == "" {
@@ -50,6 +56,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
 		return job.Error(err)
 	}
 	v := &engine.Env{}
+	v.Set("ID", daemon.ID)
 	v.SetInt("Containers", len(daemon.List()))
 	v.SetInt("Images", imgcount)
 	v.Set("Driver", daemon.GraphDriver().String())
@@ -67,6 +74,13 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
 	v.Set("IndexServerAddress", registry.IndexServerAddress())
 	v.Set("InitSha1", dockerversion.INITSHA1)
 	v.Set("InitPath", initPath)
+	v.SetInt("NCPU", runtime.NumCPU())
+	v.SetInt64("MemTotal", meminfo.MemTotal)
+	v.Set("DockerRootDir", daemon.Config().Root)
+	if hostname, err := os.Hostname(); err == nil {
+		v.Set("Name", hostname)
+	}
+	v.SetList("Labels", daemon.Config().Labels)
 	if _, err := v.WriteTo(job.Stdout); err != nil {
 		return job.Error(err)
 	}

+ 18 - 0
daemon/inspect.go

@@ -65,3 +65,21 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
 	}
 	return job.Errorf("No such container: %s", name)
 }
+
+func (daemon *Daemon) ContainerExecInspect(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("usage: %s ID", job.Name)
+	}
+	id := job.Args[0]
+	eConfig, err := daemon.getExecConfig(id)
+	if err != nil {
+		return job.Error(err)
+	}
+
+	b, err := json.Marshal(*eConfig)
+	if err != nil {
+		return job.Error(err)
+	}
+	job.Stdout.Write(b)
+	return engine.StatusOK
+}

+ 12 - 7
daemon/list.go

@@ -28,7 +28,6 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 		size        = job.GetenvBool("size")
 		psFilters   filters.Args
 		filt_exited []int
-		filt_status []string
 	)
 	outs := engine.NewTable("Created", 0)
 
@@ -46,8 +45,6 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 		}
 	}
 
-	filt_status, _ = psFilters["status"]
-
 	names := map[string][]string{}
 	daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
 		names[e.ID()] = append(names[e.ID()], p)
@@ -76,6 +73,15 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 		if !container.Running && !all && n <= 0 && since == "" && before == "" {
 			return nil
 		}
+
+		if !psFilters.Match("name", container.Name) {
+			return nil
+		}
+
+		if !psFilters.Match("id", container.ID) {
+			return nil
+		}
+
 		if before != "" && !foundBefore {
 			if container.ID == beforeCont.ID {
 				foundBefore = true
@@ -102,10 +108,9 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 				return nil
 			}
 		}
-		for _, status := range filt_status {
-			if container.State.StateString() != strings.ToLower(status) {
-				return nil
-			}
+
+		if !psFilters.Match("status", container.State.StateString()) {
+			return nil
 		}
 		displayed++
 		out := &engine.Env{}

+ 17 - 4
daemon/logs.go

@@ -7,10 +7,11 @@ import (
 	"io"
 	"os"
 	"strconv"
+	"sync"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/jsonlog"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/tailfile"
 	"github.com/docker/docker/pkg/timeutils"
 )
@@ -112,24 +113,36 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
 	}
 	if follow && container.IsRunning() {
 		errors := make(chan error, 2)
+		wg := sync.WaitGroup{}
+
 		if stdout {
+			wg.Add(1)
 			stdoutPipe := container.StdoutLogPipe()
 			defer stdoutPipe.Close()
 			go func() {
 				errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format)
+				wg.Done()
 			}()
 		}
 		if stderr {
+			wg.Add(1)
 			stderrPipe := container.StderrLogPipe()
 			defer stderrPipe.Close()
 			go func() {
 				errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format)
+				wg.Done()
 			}()
 		}
-		err := <-errors
-		if err != nil {
-			log.Errorf("%s", err)
+
+		wg.Wait()
+		close(errors)
+
+		for err := range errors {
+			if err != nil {
+				log.Errorf("%s", err)
+			}
 		}
+
 	}
 	return engine.StatusOK
 }

+ 11 - 8
daemon/monitor.go

@@ -6,8 +6,8 @@ import (
 	"sync"
 	"time"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/runconfig"
 )
 
@@ -100,7 +100,7 @@ func (m *containerMonitor) Close() error {
 func (m *containerMonitor) Start() error {
 	var (
 		err        error
-		exitStatus int
+		exitStatus execdriver.ExitStatus
 		// this variable indicates where we in execution flow:
 		// before Run or after
 		afterRun bool
@@ -110,7 +110,7 @@ func (m *containerMonitor) Start() error {
 	defer func() {
 		if afterRun {
 			m.container.Lock()
-			m.container.setStopped(exitStatus)
+			m.container.setStopped(&exitStatus)
 			defer m.container.Unlock()
 		}
 		m.Close()
@@ -138,6 +138,7 @@ func (m *containerMonitor) Start() error {
 			// if we receive an internal error from the initial start of a container then lets
 			// return it instead of entering the restart loop
 			if m.container.RestartCount == 0 {
+				m.container.ExitCode = -1
 				m.resetContainer(false)
 
 				return err
@@ -149,10 +150,10 @@ func (m *containerMonitor) Start() error {
 		// here container.Lock is already lost
 		afterRun = true
 
-		m.resetMonitor(err == nil && exitStatus == 0)
+		m.resetMonitor(err == nil && exitStatus.ExitCode == 0)
 
-		if m.shouldRestart(exitStatus) {
-			m.container.SetRestarting(exitStatus)
+		if m.shouldRestart(exitStatus.ExitCode) {
+			m.container.SetRestarting(&exitStatus)
 			m.container.LogEvent("die")
 			m.resetContainer(true)
 
@@ -163,10 +164,12 @@ func (m *containerMonitor) Start() error {
 			// we need to check this before reentering the loop because the waitForNextRestart could have
 			// been terminated by a request from a user
 			if m.shouldStop {
+				m.container.ExitCode = exitStatus.ExitCode
 				return err
 			}
 			continue
 		}
+		m.container.ExitCode = exitStatus.ExitCode
 		m.container.LogEvent("die")
 		m.resetContainer(true)
 		return err
@@ -206,7 +209,7 @@ func (m *containerMonitor) waitForNextRestart() {
 
 // shouldRestart checks the restart policy and applies the rules to determine if
 // the container's process should be restarted
-func (m *containerMonitor) shouldRestart(exitStatus int) bool {
+func (m *containerMonitor) shouldRestart(exitCode int) bool {
 	m.mux.Lock()
 	defer m.mux.Unlock()
 
@@ -225,7 +228,7 @@ func (m *containerMonitor) shouldRestart(exitStatus int) bool {
 			return false
 		}
 
-		return exitStatus != 0
+		return exitCode != 0
 	}
 
 	return false

+ 30 - 38
daemon/networkdriver/bridge/driver.go

@@ -4,16 +4,17 @@ import (
 	"fmt"
 	"io/ioutil"
 	"net"
-	"strings"
+	"os"
+	"strconv"
 	"sync"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/networkdriver"
 	"github.com/docker/docker/daemon/networkdriver/ipallocator"
-	"github.com/docker/docker/daemon/networkdriver/portallocator"
 	"github.com/docker/docker/daemon/networkdriver/portmapper"
 	"github.com/docker/docker/engine"
+	"github.com/docker/docker/nat"
 	"github.com/docker/docker/pkg/iptables"
-	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/networkfs/resolvconf"
 	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/libcontainer/netlink"
@@ -104,8 +105,8 @@ func InitDriver(job *engine.Job) engine.Status {
 		if !usingDefaultBridge {
 			return job.Error(err)
 		}
-		// If the iface is not found, try to create it
-		if err := createBridge(bridgeIP); err != nil {
+		// If the bridge interface is not found (or has no address), try to create it and/or add an address
+		if err := configureBridge(bridgeIP); err != nil {
 			return job.Error(err)
 		}
 
@@ -193,7 +194,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
 			if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil {
 				return fmt.Errorf("Unable to enable network bridge NAT: %s", err)
 			} else if len(output) != 0 {
-				return fmt.Errorf("Error iptables postrouting: %s", output)
+				return &iptables.ChainError{Chain: "POSTROUTING", Output: output}
 			}
 		}
 	}
@@ -234,7 +235,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
 		if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil {
 			return fmt.Errorf("Unable to allow outgoing packets: %s", err)
 		} else if len(output) != 0 {
-			return fmt.Errorf("Error iptables allow outgoing: %s", output)
+			return &iptables.ChainError{Chain: "FORWARD outgoing", Output: output}
 		}
 	}
 
@@ -245,16 +246,18 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
 		if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil {
 			return fmt.Errorf("Unable to allow incoming packets: %s", err)
 		} else if len(output) != 0 {
-			return fmt.Errorf("Error iptables allow incoming: %s", output)
+			return &iptables.ChainError{Chain: "FORWARD incoming", Output: output}
 		}
 	}
 	return nil
 }
 
-// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`,
-// and attempts to configure it with an address which doesn't conflict with any other interface on the host.
-// If it can't find an address which doesn't conflict, it will return an error.
-func createBridge(bridgeIP string) error {
+// configureBridge attempts to create and configure a network bridge interface named `bridgeIface` on the host
+// If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges
+// If the bridge `bridgeIface` already exists, it will only perform the IP address association with the existing
+// bridge (fixes issue #8444)
+// If an address which doesn't conflict with existing interfaces can't be found, an error is returned.
+func configureBridge(bridgeIP string) error {
 	nameservers := []string{}
 	resolvConf, _ := resolvconf.Get()
 	// we don't check for an error here, because we don't really care
@@ -295,7 +298,10 @@ func createBridge(bridgeIP string) error {
 	log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr)
 
 	if err := createBridgeIface(bridgeIface); err != nil {
-		return err
+		// the bridge may already exist, therefore we can ignore an "exists" error
+		if !os.IsExist(err) {
+			return err
+		}
 	}
 
 	iface, err := net.InterfaceByName(bridgeIface)
@@ -461,22 +467,13 @@ func AllocatePort(job *engine.Job) engine.Status {
 		if host, err = portmapper.Map(container, ip, hostPort); err == nil {
 			break
 		}
-
-		if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok {
-			// There is no point in immediately retrying to map an explicitly
-			// chosen port.
-			if hostPort != 0 {
-				job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error())
-				break
-			}
-
-			// Automatically chosen 'free' port failed to bind: move on the next.
-			job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String())
-		} else {
-			// some other error during mapping
-			job.Logf("Received an unexpected error during port allocation: %s", err.Error())
+		// There is no point in immediately retrying to map an explicitly
+		// chosen port.
+		if hostPort != 0 {
+			job.Logf("Failed to allocate and map port %d: %s", hostPort, err)
 			break
 		}
+		job.Logf("Failed to allocate and map port: %s, retry: %d", err, i+1)
 	}
 
 	if err != nil {
@@ -509,18 +506,13 @@ func LinkContainers(job *engine.Job) engine.Status {
 		ignoreErrors = job.GetenvBool("IgnoreErrors")
 		ports        = job.GetenvList("Ports")
 	)
-	split := func(p string) (string, string) {
-		parts := strings.Split(p, "/")
-		return parts[0], parts[1]
-	}
-
-	for _, p := range ports {
-		port, proto := split(p)
+	for _, value := range ports {
+		port := nat.Port(value)
 		if output, err := iptables.Raw(action, "FORWARD",
 			"-i", bridgeIface, "-o", bridgeIface,
-			"-p", proto,
+			"-p", port.Proto(),
 			"-s", parentIP,
-			"--dport", port,
+			"--dport", strconv.Itoa(port.Int()),
 			"-d", childIP,
 			"-j", "ACCEPT"); !ignoreErrors && err != nil {
 			return job.Error(err)
@@ -530,9 +522,9 @@ func LinkContainers(job *engine.Job) engine.Status {
 
 		if output, err := iptables.Raw(action, "FORWARD",
 			"-i", bridgeIface, "-o", bridgeIface,
-			"-p", proto,
+			"-p", port.Proto(),
 			"-s", childIP,
-			"--sport", port,
+			"--sport", strconv.Itoa(port.Int()),
 			"-d", parentIP,
 			"-j", "ACCEPT"); !ignoreErrors && err != nil {
 			return job.Error(err)

+ 55 - 39
daemon/networkdriver/ipallocator/allocator.go

@@ -1,31 +1,38 @@
 package ipallocator
 
 import (
-	"encoding/binary"
 	"errors"
+	"math/big"
 	"net"
 	"sync"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/networkdriver"
 )
 
 // allocatedMap is thread-unsafe set of allocated IP
 type allocatedMap struct {
-	p     map[uint32]struct{}
-	last  uint32
-	begin uint32
-	end   uint32
+	p     map[string]struct{}
+	last  *big.Int
+	begin *big.Int
+	end   *big.Int
 }
 
 func newAllocatedMap(network *net.IPNet) *allocatedMap {
 	firstIP, lastIP := networkdriver.NetworkRange(network)
-	begin := ipToInt(firstIP) + 2
-	end := ipToInt(lastIP) - 1
+	begin := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1))
+	end := big.NewInt(0).Sub(ipToBigInt(lastIP), big.NewInt(1))
+
+	// if IPv4 network, then allocation range starts at begin + 1 because begin is bridge IP
+	if len(firstIP) == 4 {
+		begin = begin.Add(begin, big.NewInt(1))
+	}
+
 	return &allocatedMap{
-		p:     make(map[uint32]struct{}),
+		p:     make(map[string]struct{}),
 		begin: begin,
 		end:   end,
-		last:  begin - 1, // so first allocated will be begin
+		last:  big.NewInt(0).Sub(begin, big.NewInt(1)), // so first allocated will be begin
 	}
 }
 
@@ -56,13 +63,16 @@ func RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error {
 	}
 	n := newAllocatedMap(network)
 	beginIP, endIP := networkdriver.NetworkRange(subnet)
-	begin, end := ipToInt(beginIP)+1, ipToInt(endIP)-1
-	if !(begin >= n.begin && end <= n.end && begin < end) {
+	begin := big.NewInt(0).Add(ipToBigInt(beginIP), big.NewInt(1))
+	end := big.NewInt(0).Sub(ipToBigInt(endIP), big.NewInt(1))
+
+	// Check that subnet is within network
+	if !(begin.Cmp(n.begin) >= 0 && end.Cmp(n.end) <= 0 && begin.Cmp(end) == -1) {
 		return ErrBadSubnet
 	}
-	n.begin = begin
-	n.end = end
-	n.last = begin - 1
+	n.begin.Set(begin)
+	n.end.Set(end)
+	n.last.Sub(begin, big.NewInt(1))
 	allocatedIPs[key] = n
 	return nil
 }
@@ -93,28 +103,25 @@ func ReleaseIP(network *net.IPNet, ip net.IP) error {
 	lock.Lock()
 	defer lock.Unlock()
 	if allocated, exists := allocatedIPs[network.String()]; exists {
-		pos := ipToInt(ip)
-		delete(allocated.p, pos)
+		delete(allocated.p, ip.String())
 	}
 	return nil
 }
 
 func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) {
-	pos := ipToInt(ip)
-
-	// Verify that the IP address has not been already allocated.
-	if _, ok := allocated.p[pos]; ok {
+	if _, ok := allocated.p[ip.String()]; ok {
 		return nil, ErrIPAlreadyAllocated
 	}
 
+	pos := ipToBigInt(ip)
 	// Verify that the IP address is within our network range.
-	if pos < allocated.begin || pos > allocated.end {
+	if pos.Cmp(allocated.begin) == -1 || pos.Cmp(allocated.end) == 1 {
 		return nil, ErrIPOutOfRange
 	}
 
 	// Register the IP.
-	allocated.p[pos] = struct{}{}
-	allocated.last = pos
+	allocated.p[ip.String()] = struct{}{}
+	allocated.last.Set(pos)
 
 	return ip, nil
 }
@@ -122,29 +129,38 @@ func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) {
 // return an available ip if one is currently available.  If not,
 // return the next available ip for the nextwork
 func (allocated *allocatedMap) getNextIP() (net.IP, error) {
-	for pos := allocated.last + 1; pos != allocated.last; pos++ {
-		if pos > allocated.end {
-			pos = allocated.begin
+	pos := big.NewInt(0).Set(allocated.last)
+	allRange := big.NewInt(0).Sub(allocated.end, allocated.begin)
+	for i := big.NewInt(0); i.Cmp(allRange) <= 0; i.Add(i, big.NewInt(1)) {
+		pos.Add(pos, big.NewInt(1))
+		if pos.Cmp(allocated.end) == 1 {
+			pos.Set(allocated.begin)
 		}
-		if _, ok := allocated.p[pos]; ok {
+		if _, ok := allocated.p[bigIntToIP(pos).String()]; ok {
 			continue
 		}
-		allocated.p[pos] = struct{}{}
-		allocated.last = pos
-		return intToIP(pos), nil
+		allocated.p[bigIntToIP(pos).String()] = struct{}{}
+		allocated.last.Set(pos)
+		return bigIntToIP(pos), nil
 	}
 	return nil, ErrNoAvailableIPs
 }
 
-// Converts a 4 bytes IP into a 32 bit integer
-func ipToInt(ip net.IP) uint32 {
-	return binary.BigEndian.Uint32(ip.To4())
+// Converts a 4 bytes IP into a 128 bit integer
+func ipToBigInt(ip net.IP) *big.Int {
+	x := big.NewInt(0)
+	if ip4 := ip.To4(); ip4 != nil {
+		return x.SetBytes(ip4)
+	}
+	if ip6 := ip.To16(); ip6 != nil {
+		return x.SetBytes(ip6)
+	}
+
+	log.Errorf("ipToBigInt: Wrong IP length! %s", ip)
+	return nil
 }
 
-// Converts 32 bit integer into a 4 bytes IP address
-func intToIP(n uint32) net.IP {
-	b := make([]byte, 4)
-	binary.BigEndian.PutUint32(b, n)
-	ip := net.IP(b)
-	return ip
+// Converts 128 bit integer into a 4 bytes IP address
+func bigIntToIP(v *big.Int) net.IP {
+	return net.IP(v.Bytes())
 }

+ 253 - 11
daemon/networkdriver/ipallocator/allocator_test.go

@@ -2,6 +2,7 @@ package ipallocator
 
 import (
 	"fmt"
+	"math/big"
 	"net"
 	"testing"
 )
@@ -10,6 +11,46 @@ func reset() {
 	allocatedIPs = networkSet{}
 }
 
+func TestConversion(t *testing.T) {
+	ip := net.ParseIP("127.0.0.1")
+	i := ipToBigInt(ip)
+	if i.Cmp(big.NewInt(0x7f000001)) != 0 {
+		t.Fatal("incorrect conversion")
+	}
+	conv := bigIntToIP(i)
+	if !ip.Equal(conv) {
+		t.Error(conv.String())
+	}
+}
+
+func TestConversionIPv6(t *testing.T) {
+	ip := net.ParseIP("2a00:1450::1")
+	ip2 := net.ParseIP("2a00:1450::2")
+	ip3 := net.ParseIP("2a00:1450::1:1")
+	i := ipToBigInt(ip)
+	val, success := big.NewInt(0).SetString("2a001450000000000000000000000001", 16)
+	if !success {
+		t.Fatal("Hex-String to BigInt conversion failed.")
+	}
+	if i.Cmp(val) != 0 {
+		t.Fatal("incorrent conversion")
+	}
+
+	conv := bigIntToIP(i)
+	conv2 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(1)))
+	conv3 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(0x10000)))
+
+	if !ip.Equal(conv) {
+		t.Error("2a00:1450::1 should be equal to " + conv.String())
+	}
+	if !ip2.Equal(conv2) {
+		t.Error("2a00:1450::2 should be equal to " + conv2.String())
+	}
+	if !ip3.Equal(conv3) {
+		t.Error("2a00:1450::1:1 should be equal to " + conv3.String())
+	}
+}
+
 func TestRequestNewIps(t *testing.T) {
 	defer reset()
 	network := &net.IPNet{
@@ -19,6 +60,7 @@ func TestRequestNewIps(t *testing.T) {
 
 	var ip net.IP
 	var err error
+
 	for i := 2; i < 10; i++ {
 		ip, err = RequestIP(network, nil)
 		if err != nil {
@@ -29,7 +71,39 @@ func TestRequestNewIps(t *testing.T) {
 			t.Fatalf("Expected ip %s got %s", expected, ip.String())
 		}
 	}
-	value := intToIP(ipToInt(ip) + 1).String()
+	value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String()
+	if err := ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+	ip, err = RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ip.String() != value {
+		t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String())
+	}
+}
+
+func TestRequestNewIpV6(t *testing.T) {
+	defer reset()
+	network := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
+	}
+
+	var ip net.IP
+	var err error
+	for i := 1; i < 10; i++ {
+		ip, err = RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if expected := fmt.Sprintf("2a00:1450::%d", i); ip.String() != expected {
+			t.Fatalf("Expected ip %s got %s", expected, ip.String())
+		}
+	}
+	value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String()
 	if err := ReleaseIP(network, ip); err != nil {
 		t.Fatal(err)
 	}
@@ -59,6 +133,23 @@ func TestReleaseIp(t *testing.T) {
 	}
 }
 
+func TestReleaseIpV6(t *testing.T) {
+	defer reset()
+	network := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
+	}
+
+	ip, err := RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+}
+
 func TestGetReleasedIp(t *testing.T) {
 	defer reset()
 	network := &net.IPNet{
@@ -97,6 +188,44 @@ func TestGetReleasedIp(t *testing.T) {
 	}
 }
 
+func TestGetReleasedIpV6(t *testing.T) {
+	defer reset()
+	network := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0},
+	}
+
+	ip, err := RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	value := ip.String()
+	if err := ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+
+	for i := 0; i < 253; i++ {
+		_, err = RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		err = ReleaseIP(network, ip)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	ip, err = RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ip.String() != value {
+		t.Fatalf("Expected to receive same ip %s got %s", value, ip.String())
+	}
+}
+
 func TestRequestSpecificIp(t *testing.T) {
 	defer reset()
 	network := &net.IPNet{
@@ -122,15 +251,28 @@ func TestRequestSpecificIp(t *testing.T) {
 	}
 }
 
-func TestConversion(t *testing.T) {
-	ip := net.ParseIP("127.0.0.1")
-	i := ipToInt(ip)
-	if i == 0 {
-		t.Fatal("converted to zero")
+func TestRequestSpecificIpV6(t *testing.T) {
+	defer reset()
+	network := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
 	}
-	conv := intToIP(i)
-	if !ip.Equal(conv) {
-		t.Error(conv.String())
+
+	ip := net.ParseIP("2a00:1450::5")
+
+	// Request a "good" IP.
+	if _, err := RequestIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+
+	// Request the same IP again.
+	if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated {
+		t.Fatalf("Got the same IP twice: %#v", err)
+	}
+
+	// Request an out of range IP.
+	if _, err := RequestIP(network, net.ParseIP("2a00:1500::1")); err != ErrIPOutOfRange {
+		t.Fatalf("Got an out of range IP: %#v", err)
 	}
 }
 
@@ -144,6 +286,7 @@ func TestIPAllocator(t *testing.T) {
 	}
 
 	gwIP, n, _ := net.ParseCIDR("127.0.0.1/29")
+
 	network := &net.IPNet{IP: gwIP, Mask: n.Mask}
 	// Pool after initialisation (f = free, u = used)
 	// 2(f) - 3(f) - 4(f) - 5(f) - 6(f)
@@ -237,13 +380,13 @@ func TestAllocateFirstIP(t *testing.T) {
 	}
 
 	firstIP := network.IP.To4().Mask(network.Mask)
-	first := ipToInt(firstIP) + 1
+	first := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1))
 
 	ip, err := RequestIP(network, nil)
 	if err != nil {
 		t.Fatal(err)
 	}
-	allocated := ipToInt(ip)
+	allocated := ipToBigInt(ip)
 
 	if allocated == first {
 		t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated)
@@ -289,6 +432,65 @@ func TestAllocateAllIps(t *testing.T) {
 	}
 
 	assertIPEquals(t, first, again)
+
+	// ensure that alloc.last == alloc.begin won't result in dead loop
+	if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs {
+		t.Fatal(err)
+	}
+
+	// Test by making alloc.last the only free ip and ensure we get it back
+	// #1. first of the range, (alloc.last == ipToInt(first) already)
+	if err := ReleaseIP(network, first); err != nil {
+		t.Fatal(err)
+	}
+
+	ret, err := RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertIPEquals(t, first, ret)
+
+	// #2. last of the range, note that current is the last one
+	last := net.IPv4(192, 168, 0, 254)
+	setLastTo(t, network, last)
+
+	ret, err = RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertIPEquals(t, last, ret)
+
+	// #3. middle of the range
+	mid := net.IPv4(192, 168, 0, 7)
+	setLastTo(t, network, mid)
+
+	ret, err = RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertIPEquals(t, mid, ret)
+}
+
+// make sure the pool is full when calling setLastTo.
+// we don't cheat here
+func setLastTo(t *testing.T, network *net.IPNet, ip net.IP) {
+	if err := ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+
+	ret, err := RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertIPEquals(t, ip, ret)
+
+	if err := ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
 }
 
 func TestAllocateDifferentSubnets(t *testing.T) {
@@ -301,11 +503,24 @@ func TestAllocateDifferentSubnets(t *testing.T) {
 		IP:   []byte{127, 0, 0, 1},
 		Mask: []byte{255, 255, 255, 0},
 	}
+	network3 := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
+	}
+	network4 := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x16, 0x32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
+	}
 	expectedIPs := []net.IP{
 		0: net.IPv4(192, 168, 0, 2),
 		1: net.IPv4(192, 168, 0, 3),
 		2: net.IPv4(127, 0, 0, 2),
 		3: net.IPv4(127, 0, 0, 3),
+		4: net.ParseIP("2a00:1450::1"),
+		5: net.ParseIP("2a00:1450::2"),
+		6: net.ParseIP("2a00:1450::3"),
+		7: net.ParseIP("2a00:1632::1"),
+		8: net.ParseIP("2a00:1632::2"),
 	}
 
 	ip11, err := RequestIP(network1, nil)
@@ -324,11 +539,37 @@ func TestAllocateDifferentSubnets(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
+	ip31, err := RequestIP(network3, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip32, err := RequestIP(network3, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip33, err := RequestIP(network3, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip41, err := RequestIP(network4, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip42, err := RequestIP(network4, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
 	assertIPEquals(t, expectedIPs[0], ip11)
 	assertIPEquals(t, expectedIPs[1], ip12)
 	assertIPEquals(t, expectedIPs[2], ip21)
 	assertIPEquals(t, expectedIPs[3], ip22)
+	assertIPEquals(t, expectedIPs[4], ip31)
+	assertIPEquals(t, expectedIPs[5], ip32)
+	assertIPEquals(t, expectedIPs[6], ip33)
+	assertIPEquals(t, expectedIPs[7], ip41)
+	assertIPEquals(t, expectedIPs[8], ip42)
 }
+
 func TestRegisterBadTwice(t *testing.T) {
 	defer reset()
 	network := &net.IPNet{
@@ -378,6 +619,7 @@ func TestAllocateFromRange(t *testing.T) {
 		IP:   []byte{192, 168, 0, 8},
 		Mask: []byte{255, 255, 255, 248},
 	}
+
 	if err := RegisterSubnet(network, subnet); err != nil {
 		t.Fatal(err)
 	}

+ 0 - 15
daemon/networkdriver/network_test.go

@@ -122,9 +122,6 @@ func TestNetworkRange(t *testing.T) {
 	if !last.Equal(net.ParseIP("192.168.0.255")) {
 		t.Error(last.String())
 	}
-	if size := NetworkSize(network.Mask); size != 256 {
-		t.Error(size)
-	}
 
 	// Class A test
 	_, network, _ = net.ParseCIDR("10.0.0.1/8")
@@ -135,9 +132,6 @@ func TestNetworkRange(t *testing.T) {
 	if !last.Equal(net.ParseIP("10.255.255.255")) {
 		t.Error(last.String())
 	}
-	if size := NetworkSize(network.Mask); size != 16777216 {
-		t.Error(size)
-	}
 
 	// Class A, random IP address
 	_, network, _ = net.ParseCIDR("10.1.2.3/8")
@@ -158,9 +152,6 @@ func TestNetworkRange(t *testing.T) {
 	if !last.Equal(net.ParseIP("10.1.2.3")) {
 		t.Error(last.String())
 	}
-	if size := NetworkSize(network.Mask); size != 1 {
-		t.Error(size)
-	}
 
 	// 31bit mask
 	_, network, _ = net.ParseCIDR("10.1.2.3/31")
@@ -171,9 +162,6 @@ func TestNetworkRange(t *testing.T) {
 	if !last.Equal(net.ParseIP("10.1.2.3")) {
 		t.Error(last.String())
 	}
-	if size := NetworkSize(network.Mask); size != 2 {
-		t.Error(size)
-	}
 
 	// 26bit mask
 	_, network, _ = net.ParseCIDR("10.1.2.3/26")
@@ -184,7 +172,4 @@ func TestNetworkRange(t *testing.T) {
 	if !last.Equal(net.ParseIP("10.1.2.63")) {
 		t.Error(last.String())
 	}
-	if size := NetworkSize(network.Mask); size != 64 {
-		t.Error(size)
-	}
 }

+ 5 - 8
daemon/networkdriver/portallocator/portallocator.go

@@ -14,7 +14,8 @@ type portMap struct {
 
 func newPortMap() *portMap {
 	return &portMap{
-		p: map[int]struct{}{},
+		p:    map[int]struct{}{},
+		last: EndPortRange,
 	}
 }
 
@@ -135,13 +136,9 @@ func ReleaseAll() error {
 }
 
 func (pm *portMap) findPort() (int, error) {
-	if pm.last == 0 {
-		pm.p[BeginPortRange] = struct{}{}
-		pm.last = BeginPortRange
-		return BeginPortRange, nil
-	}
-
-	for port := pm.last + 1; port != pm.last; port++ {
+	port := pm.last
+	for i := 0; i <= EndPortRange-BeginPortRange; i++ {
+		port++
 		if port > EndPortRange {
 			port = BeginPortRange
 		}

+ 29 - 0
daemon/networkdriver/portallocator/portallocator_test.go

@@ -134,6 +134,19 @@ func TestAllocateAllPorts(t *testing.T) {
 	if newPort != port {
 		t.Fatalf("Expected port %d got %d", port, newPort)
 	}
+
+	// now pm.last == newPort, release it so that it's the only free port of
+	// the range, and ensure we get it back
+	if err := ReleasePort(defaultIP, "tcp", newPort); err != nil {
+		t.Fatal(err)
+	}
+	port, err = RequestPort(defaultIP, "tcp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if newPort != port {
+		t.Fatalf("Expected port %d got %d", newPort, port)
+	}
 }
 
 func BenchmarkAllocatePorts(b *testing.B) {
@@ -214,3 +227,19 @@ func TestPortAllocation(t *testing.T) {
 		t.Fatal("Requesting a dynamic port should never allocate a used port")
 	}
 }
+
+func TestNoDuplicateBPR(t *testing.T) {
+	defer reset()
+
+	if port, err := RequestPort(defaultIP, "tcp", BeginPortRange); err != nil {
+		t.Fatal(err)
+	} else if port != BeginPortRange {
+		t.Fatalf("Expected port %d got %d", BeginPortRange, port)
+	}
+
+	if port, err := RequestPort(defaultIP, "tcp", 0); err != nil {
+		t.Fatal(err)
+	} else if port == BeginPortRange {
+		t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
+	}
+}

+ 1 - 1
daemon/networkdriver/portmapper/mapper.go

@@ -6,9 +6,9 @@ import (
 	"net"
 	"sync"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/networkdriver/portallocator"
 	"github.com/docker/docker/pkg/iptables"
-	"github.com/docker/docker/pkg/log"
 )
 
 type mapping struct {

+ 7 - 2
daemon/networkdriver/portmapper/proxy.go

@@ -130,7 +130,12 @@ func (p *proxyCommand) Start() error {
 		r.Read(buf)
 
 		if string(buf) != "0\n" {
-			errStr, _ := ioutil.ReadAll(r)
+			errStr, err := ioutil.ReadAll(r)
+			if err != nil {
+				errchan <- fmt.Errorf("Error reading exit status from userland proxy: %v", err)
+				return
+			}
+
 			errchan <- fmt.Errorf("Error starting userland proxy: %s", errStr)
 			return
 		}
@@ -140,7 +145,7 @@ func (p *proxyCommand) Start() error {
 	select {
 	case err := <-errchan:
 		return err
-	case <-time.After(1 * time.Second):
+	case <-time.After(16 * time.Second):
 		return fmt.Errorf("Timed out proxy starting the userland proxy")
 	}
 }

+ 13 - 18
daemon/networkdriver/utils.go

@@ -1,7 +1,6 @@
 package networkdriver
 
 import (
-	"encoding/binary"
 	"errors"
 	"fmt"
 	"net"
@@ -56,25 +55,21 @@ func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool {
 
 // Calculates the first and last IP addresses in an IPNet
 func NetworkRange(network *net.IPNet) (net.IP, net.IP) {
-	var (
-		netIP   = network.IP.To4()
-		firstIP = netIP.Mask(network.Mask)
-		lastIP  = net.IPv4(0, 0, 0, 0).To4()
-	)
-
-	for i := 0; i < len(lastIP); i++ {
-		lastIP[i] = netIP[i] | ^network.Mask[i]
+	var netIP net.IP
+	if network.IP.To4() != nil {
+		netIP = network.IP.To4()
+	} else if network.IP.To16() != nil {
+		netIP = network.IP.To16()
+	} else {
+		return nil, nil
 	}
-	return firstIP, lastIP
-}
 
-// Given a netmask, calculates the number of available hosts
-func NetworkSize(mask net.IPMask) int32 {
-	m := net.IPv4Mask(0, 0, 0, 0)
-	for i := 0; i < net.IPv4len; i++ {
-		m[i] = ^mask[i]
+	lastIP := make([]byte, len(netIP), len(netIP))
+
+	for i := 0; i < len(netIP); i++ {
+		lastIP[i] = netIP[i] | ^network.Mask[i]
 	}
-	return int32(binary.BigEndian.Uint32(m)) + 1
+	return netIP.Mask(network.Mask), net.IP(lastIP)
 }
 
 // Return the IPv4 address of a network interface
@@ -90,7 +85,7 @@ func GetIfaceAddr(name string) (net.Addr, error) {
 	var addrs4 []net.Addr
 	for _, addr := range addrs {
 		ip := (addr.(*net.IPNet)).IP
-		if ip4 := ip.To4(); len(ip4) == net.IPv4len {
+		if ip4 := ip.To4(); ip4 != nil {
 			addrs4 = append(addrs4, addr)
 		}
 	}

+ 19 - 6
daemon/state.go

@@ -5,6 +5,7 @@ import (
 	"sync"
 	"time"
 
+	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/pkg/units"
 )
 
@@ -13,8 +14,10 @@ type State struct {
 	Running    bool
 	Paused     bool
 	Restarting bool
+	OOMKilled  bool
 	Pid        int
 	ExitCode   int
+	Error      string // contains last known error when starting the container
 	StartedAt  time.Time
 	FinishedAt time.Time
 	waitChan   chan struct{}
@@ -137,6 +140,7 @@ func (s *State) SetRunning(pid int) {
 }
 
 func (s *State) setRunning(pid int) {
+	s.Error = ""
 	s.Running = true
 	s.Paused = false
 	s.Restarting = false
@@ -147,25 +151,26 @@ func (s *State) setRunning(pid int) {
 	s.waitChan = make(chan struct{})
 }
 
-func (s *State) SetStopped(exitCode int) {
+func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) {
 	s.Lock()
-	s.setStopped(exitCode)
+	s.setStopped(exitStatus)
 	s.Unlock()
 }
 
-func (s *State) setStopped(exitCode int) {
+func (s *State) setStopped(exitStatus *execdriver.ExitStatus) {
 	s.Running = false
 	s.Restarting = false
 	s.Pid = 0
 	s.FinishedAt = time.Now().UTC()
-	s.ExitCode = exitCode
+	s.ExitCode = exitStatus.ExitCode
+	s.OOMKilled = exitStatus.OOMKilled
 	close(s.waitChan) // fire waiters for stop
 	s.waitChan = make(chan struct{})
 }
 
 // SetRestarting is when docker hanldes the auto restart of containers when they are
 // in the middle of a stop and being restarted again
-func (s *State) SetRestarting(exitCode int) {
+func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) {
 	s.Lock()
 	// we should consider the container running when it is restarting because of
 	// all the checks in docker around rm/stop/etc
@@ -173,12 +178,20 @@ func (s *State) SetRestarting(exitCode int) {
 	s.Restarting = true
 	s.Pid = 0
 	s.FinishedAt = time.Now().UTC()
-	s.ExitCode = exitCode
+	s.ExitCode = exitStatus.ExitCode
+	s.OOMKilled = exitStatus.OOMKilled
 	close(s.waitChan) // fire waiters for stop
 	s.waitChan = make(chan struct{})
 	s.Unlock()
 }
 
+// setError sets the container's error state. This is useful when we want to
+// know the error that occurred when container transits to another state
+// when inspecting it
+func (s *State) setError(err error) {
+	s.Error = err.Error()
+}
+
 func (s *State) IsRestarting() bool {
 	s.Lock()
 	res := s.Restarting

+ 3 - 1
daemon/state_test.go

@@ -4,6 +4,8 @@ import (
 	"sync/atomic"
 	"testing"
 	"time"
+
+	"github.com/docker/docker/daemon/execdriver"
 )
 
 func TestStateRunStop(t *testing.T) {
@@ -47,7 +49,7 @@ func TestStateRunStop(t *testing.T) {
 			atomic.StoreInt64(&exit, int64(exitCode))
 			close(stopped)
 		}()
-		s.SetStopped(i)
+		s.SetStopped(&execdriver.ExitStatus{i, false})
 		if s.IsRunning() {
 			t.Fatal("State is running")
 		}

+ 7 - 3
daemon/utils.go

@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"errors"
 	"fmt"
 	"strings"
 
@@ -32,9 +33,9 @@ func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostCon
 	return nil
 }
 
-func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) []string {
+func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) ([]string, error) {
 	if hostConfig == nil {
-		return nil
+		return nil, nil
 	}
 
 	out := []string{}
@@ -44,10 +45,13 @@ func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) []string {
 		for _, pair := range lxcConf {
 			// because lxc conf gets the driver name lxc.XXXX we need to trim it off
 			// and let the lxc driver add it back later if needed
+			if !strings.Contains(pair.Key, ".") {
+				return nil, errors.New("Illegal Key passed into LXC Configurations")
+			}
 			parts := strings.SplitN(pair.Key, ".", 2)
 			out = append(out, fmt.Sprintf("%s=%s", parts[1], pair.Value))
 		}
 	}
 
-	return out
+	return out, nil
 }

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác