Bläddra i källkod

Merge pull request #10286 from icecrime/bump_v1.5.0

Bump to version v1.5.0
Arnaud Porterie 10 år sedan
förälder
incheckning
4bae33ef9f
100 ändrade filer med 3946 tillägg och 1517 borttagningar
  1. 2 2
      .drone.yml
  2. 1 0
      .gitignore
  3. 19 2
      .mailmap
  4. 112 57
      AUTHORS
  5. 39 0
      CHANGELOG.md
  6. 40 13
      CONTRIBUTING.md
  7. 85 34
      Dockerfile
  8. 1 1
      LICENSE
  9. 590 9
      MAINTAINERS
  10. 12 5
      Makefile
  11. 55 21
      README.md
  12. 1 1
      VERSION
  13. 17 10
      api/client/cli.go
  14. 227 294
      api/client/commands.go
  15. 21 1
      api/client/hijack.go
  16. 3 3
      api/client/utils.go
  17. 7 6
      api/common.go
  18. 1 1
      api/server/MAINTAINERS
  19. 83 22
      api/server/server.go
  20. 1 3
      api/server/server_unit_test.go
  21. 87 0
      api/stats/stats.go
  22. 1 0
      builder/MAINTAINERS
  23. 27 10
      builder/dispatchers.go
  24. 82 32
      builder/evaluator.go
  25. 48 31
      builder/internals.go
  26. 10 2
      builder/job.go
  27. 55 0
      builder/parser/json_test.go
  28. 16 24
      builder/parser/line_parsers.go
  29. 9 7
      builder/parser/parser.go
  30. 2 9
      builder/parser/parser_test.go
  31. 8 0
      builder/parser/testfiles-negative/empty-instruction/Dockerfile
  32. 9 0
      builder/parser/testfiles/ADD-COPY-with-JSON/Dockerfile
  33. 8 0
      builder/parser/testfiles/ADD-COPY-with-JSON/result
  34. 1 1
      builder/parser/testfiles/brimstone-consuldock/result
  35. 3 3
      builder/parser/testfiles/brimstone-docker-consul/result
  36. 2 2
      builder/parser/testfiles/docker/result
  37. 8 0
      builder/parser/testfiles/json/Dockerfile
  38. 8 0
      builder/parser/testfiles/json/result
  39. 2 22
      builder/parser/utils.go
  40. 3 0
      contrib/check-config.sh
  41. 232 176
      contrib/completion/bash/docker
  42. 186 77
      contrib/completion/fish/docker.fish
  43. 1 0
      contrib/init/systemd/docker.service
  44. 2 1
      contrib/init/sysvinit-redhat/docker
  45. 17 0
      contrib/init/upstart/docker.conf
  46. 28 2
      contrib/mkimage-arch.sh
  47. 5 0
      contrib/mkimage/debootstrap
  48. 3 2
      contrib/nuke-graph-directory.sh
  49. 4 1
      contrib/syntax/vim/README.md
  50. 76 112
      daemon/attach.go
  51. 4 4
      daemon/commit.go
  52. 6 14
      daemon/config.go
  53. 155 41
      daemon/container.go
  54. 22 11
      daemon/create.go
  55. 125 36
      daemon/daemon.go
  56. 4 1
      daemon/delete.go
  57. 21 5
      daemon/exec.go
  58. 28 7
      daemon/execdriver/driver.go
  59. 2 1
      daemon/execdriver/execdrivers/execdrivers.go
  60. 11 6
      daemon/execdriver/lxc/driver.go
  61. 5 4
      daemon/execdriver/lxc/lxc_init_linux.go
  62. 77 35
      daemon/execdriver/lxc/lxc_template.go
  63. 80 12
      daemon/execdriver/lxc/lxc_template_unit_test.go
  64. 22 7
      daemon/execdriver/native/create.go
  65. 50 15
      daemon/execdriver/native/driver.go
  66. 7 7
      daemon/execdriver/native/template/default_template.go
  67. 20 15
      daemon/graphdriver/aufs/aufs.go
  68. 1 1
      daemon/graphdriver/aufs/aufs_test.go
  69. 2 1
      daemon/graphdriver/btrfs/btrfs.go
  70. 40 1
      daemon/graphdriver/devmapper/README.md
  71. 65 35
      daemon/graphdriver/devmapper/deviceset.go
  72. 29 9
      daemon/graphdriver/devmapper/driver.go
  73. 69 26
      daemon/graphdriver/driver.go
  74. 14 0
      daemon/graphdriver/driver_linux.go
  75. 7 0
      daemon/graphdriver/driver_unsupported.go
  76. 6 24
      daemon/graphdriver/fsdiff.go
  77. 2 1
      daemon/graphdriver/graphtest/graphtest.go
  78. 36 16
      daemon/graphdriver/overlay/overlay.go
  79. 2 11
      daemon/graphdriver/vfs/driver.go
  80. 2 2
      daemon/image_delete.go
  81. 12 2
      daemon/info.go
  82. 6 3
      daemon/inspect.go
  83. 12 5
      daemon/list.go
  84. 11 3
      daemon/monitor.go
  85. 12 7
      daemon/network_settings.go
  86. 239 55
      daemon/networkdriver/bridge/driver.go
  87. 41 0
      daemon/networkdriver/bridge/driver_test.go
  88. 0 1
      daemon/networkdriver/ipallocator/allocator.go
  89. 1 1
      daemon/networkdriver/portmapper/mapper.go
  90. 16 11
      daemon/networkdriver/utils.go
  91. 34 0
      daemon/rename.go
  92. 10 2
      daemon/start.go
  93. 1 1
      daemon/state_test.go
  94. 98 0
      daemon/stats.go
  95. 129 0
      daemon/stats_collector.go
  96. 1 32
      daemon/utils_test.go
  97. 63 36
      daemon/volumes.go
  98. 53 2
      docker/daemon.go
  99. 4 2
      docker/docker.go
  100. 29 7
      docker/flags.go

+ 2 - 2
.drone.yml

@@ -10,5 +10,5 @@ script:
   - rm integration-cli/docker_cli_daemon_test.go
   - rm integration-cli/docker_cli_exec_test.go 
 # Validate and test.
-  - hack/make.sh validate-dco validate-gofmt
-  - hack/make.sh binary cross test-unit test-integration-cli test-integration
+  - hack/make.sh validate-dco validate-gofmt validate-toml
+  - hack/make.sh binary cross test-unit test-integration-cli test-integration test-docker-py

+ 1 - 0
.gitignore

@@ -28,3 +28,4 @@ docs/AWS_S3_BUCKET
 docs/GIT_BRANCH
 docs/VERSION
 docs/GITCOMMIT
+docs/changed-files

+ 19 - 2
.mailmap

@@ -6,6 +6,24 @@
 #
 # For explanation on this file format: man git-shortlog
 
+Patrick Stapleton <github@gdi2290.com>
+Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
+Erwin van der Koogh <info@erronis.nl>
+Ahmed Kamal <email.ahmedkamal@googlemail.com>
+Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
+Cristian Staretu <cristian.staretu@gmail.com>
+Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
+Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
+Marcus Linke <marcus.linke@gmx.de>
+Aleksandrs Fadins <aleks@s-ko.net>
+Christopher Latham <sudosurootdev@gmail.com>
+Hu Keping <hukeping@huawei.com>
+Wayne Chang <wayne@neverfear.org>
+Chen Chao <cc272309126@gmail.com>
+Daehyeok Mun <daehyeok@gmail.com>
+<daehyeok@gmail.com> <daehyeok@daehyeokui-MacBook-Air.local>
+<jt@yadutaf.fr> <admin@jtlebi.fr>
+<jeff@docker.com> <jefferya@programmerq.net>
 <charles.hooper@dotcloud.com> <chooper@plumata.com>
 <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
 <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
@@ -58,7 +76,7 @@ Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
 <michael@docker.com> <michael@crosbymichael.com>
 <michael@docker.com> <crosby.michael@gmail.com>
 <michael@docker.com> <crosbymichael@gmail.com>
-<github@developersupport.net> <github@metaliveblog.com> 
+<github@developersupport.net> <github@metaliveblog.com>
 <brandon@ifup.org> <brandon@ifup.co>
 <dano@spotify.com> <daniel.norberg@gmail.com>
 <danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
@@ -74,7 +92,6 @@ Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
 Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
 Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
 Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
-unclejack <unclejacksons@gmail.com> <unclejack@users.noreply.github.com>
 <alexl@redhat.com> <alexander.larsson@gmail.com>
 Alexandr Morozov <lk4d4math@gmail.com>
 <git.nivoc@neverbox.com> <kuehnle@online.de>

+ 112 - 57
AUTHORS

@@ -12,41 +12,46 @@ Adam Singer <financeCoding@gmail.com>
 Aditya <aditya@netroy.in>
 Adrian Mouat <adrian.mouat@gmail.com>
 Adrien Folie <folie.adrien@gmail.com>
+Ahmed Kamal <email.ahmedkamal@googlemail.com>
 Ahmet Alp Balkan <ahmetb@microsoft.com>
+Aidan Hobson Sayers <aidanhs@cantab.net>
 AJ Bowen <aj@gandi.net>
+Al Tobey <al@ooyala.com>
 alambike <alambike@gmail.com>
 Alan Thompson <cloojure@gmail.com>
 Albert Callarisa <shark234@gmail.com>
 Albert Zhang <zhgwenming@gmail.com>
 Aleksa Sarai <cyphar@cyphar.com>
+Aleksandrs Fadins <aleks@s-ko.net>
+Alex Gaynor <alex.gaynor@gmail.com>
+Alex Warhawk <ax.warhawk@gmail.com>
+Alexander Boyd <alex@opengroove.org>
 Alexander Larsson <alexl@redhat.com>
+Alexander Morozov <lk4d4@docker.com>
 Alexander Shopov <ash@kambanaria.org>
 Alexandr Morozov <lk4d4@docker.com>
 Alexey Kotlyarov <alexey@infoxchange.net.au>
 Alexey Shamrin <shamrin@gmail.com>
-Alex Gaynor <alex.gaynor@gmail.com>
 Alexis THOMAS <fr.alexisthomas@gmail.com>
-Alex Warhawk <ax.warhawk@gmail.com>
 almoehi <almoehi@users.noreply.github.com>
-Al Tobey <al@ooyala.com>
-Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
 amangoel <amangoel@gmail.com>
 Amit Bakshi <ambakshi@gmail.com>
-AnandkumarPatel <anandkumarpatel@gmail.com>
 Anand Patil <anand.prabhakar.patil@gmail.com>
+AnandkumarPatel <anandkumarpatel@gmail.com>
+Andre Dublin <81dublin@gmail.com>
 Andrea Luzzardi <aluzzardi@gmail.com>
+Andrea Turli <andrea.turli@gmail.com>
 Andreas Köhler <andi5.py@gmx.net>
 Andreas Savvides <andreas@editd.com>
 Andreas Tiefenthaler <at@an-ti.eu>
-Andrea Turli <andrea.turli@gmail.com>
-Andre Dublin <81dublin@gmail.com>
+Andrew C. Bodine <acbodine@us.ibm.com>
 Andrew Duckworth <grillopress@gmail.com>
 Andrew France <andrew@avito.co.uk>
 Andrew Macgregor <andrew.macgregor@agworld.com.au>
 Andrew Munsell <andrew@wizardapps.net>
-Andrews Medina <andrewsmedina@gmail.com>
 Andrew Weiss <andrew.weiss@outlook.com>
 Andrew Williams <williams.andrew@gmail.com>
+Andrews Medina <andrewsmedina@gmail.com>
 Andrey Petrov <andrey.petrov@shazow.net>
 Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
 Andy Chambers <anchambers@paypal.com>
@@ -56,6 +61,8 @@ Andy Kipp <andy@rstudio.com>
 Andy Rothfusz <github@developersupport.net>
 Andy Smith <github@anarkystic.com>
 Andy Wilson <wilson.andrew.j+github@gmail.com>
+Ankush Agarwal <ankushagarwal11@gmail.com>
+Anthony Baire <Anthony.Baire@irisa.fr>
 Anthony Bishopric <git@anthonybishopric.com>
 Anton Löfgren <anton.lofgren@gmail.com>
 Anton Nikitin <anton.k.nikitin@gmail.com>
@@ -72,11 +79,11 @@ Barry Allard <barry.allard@gmail.com>
 Bartłomiej Piotrowski <b@bpiotrowski.pl>
 bdevloed <boris.de.vloed@gmail.com>
 Ben Firshman <ben@firshman.co.uk>
-Benjamin Atkin <ben@benatkin.com>
-Benoit Chesneau <bchesneau@gmail.com>
 Ben Sargent <ben@brokendigits.com>
 Ben Toews <mastahyeti@gmail.com>
 Ben Wiklund <ben@daisyowl.com>
+Benjamin Atkin <ben@benatkin.com>
+Benoit Chesneau <bchesneau@gmail.com>
 Bernerd Schaefer <bj.schaefer@gmail.com>
 Bert Goethals <bert@bertg.be>
 Bhiraj Butala <abhiraj.butala@gmail.com>
@@ -113,18 +120,22 @@ Charles Hooper <charles.hooper@dotcloud.com>
 Charles Lindsay <chaz@chazomatic.us>
 Charles Merriam <charles.merriam@gmail.com>
 Charlie Lewis <charliel@lab41.org>
+Chen Chao <cc272309126@gmail.com>
 Chewey <prosto-chewey@users.noreply.github.com>
 Chia-liang Kao <clkao@clkao.org>
 Chris Alfonso <calfonso@redhat.com>
 Chris Armstrong <chris@opdemand.com>
-chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
 Chris Snow <chsnow123@gmail.com>
 Chris St. Pierre <chris.a.st.pierre@gmail.com>
+chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
 Christian Berendt <berendt@b1-systems.de>
+Christian Stefanescu <st.chris@gmail.com>
 ChristoperBiscardi <biscarch@sketcht.com>
+Christophe Troestler <christophe.Troestler@umons.ac.be>
 Christopher Currie <codemonkey+github@gmail.com>
+Christopher Latham <sudosurootdev@gmail.com>
 Christopher Rigor <crigor@gmail.com>
-Christophe Troestler <christophe.Troestler@umons.ac.be>
+Chun Chen <chenchun.feed@gmail.com>
 Ciro S. Costa <ciro.costa@usp.br>
 Clayton Coleman <ccoleman@redhat.com>
 Colin Dunklau <colin.dunklau@gmail.com>
@@ -132,15 +143,20 @@ Colin Rice <colin@daedrum.net>
 Colin Walters <walters@verbum.org>
 Cory Forsyth <cory.forsyth@gmail.com>
 cressie176 <github@stephen-cresswell.net>
+Cristian Staretu <cristian.staretu@gmail.com>
 Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
 Daan van Berkel <daan.v.berkel.1980@gmail.com>
-Daehyeok.Mun <daehyeok@gmail.com>
+Daehyeok Mun <daehyeok@gmail.com>
 Dafydd Crosby <dtcrsby@gmail.com>
 Dan Buch <d.buch@modcloth.com>
 Dan Cotora <dan@bluevision.ro>
 Dan Griffin <dgriffin@peer1.com>
 Dan Hirsch <thequux@upstandinghackers.com>
-Daniel, Dao Quang Minh <dqminh89@gmail.com>
+Dan Keder <dan.keder@gmail.com>
+Dan McPherson <dmcphers@redhat.com>
+Dan Stine <sw@stinemail.com>
+Dan Walsh <dwalsh@redhat.com>
+Dan Williams <me@deedubs.com>
 Daniel Exner <dex@dragonslave.de>
 Daniel Farrell <dfarrell@redhat.com>
 Daniel Garcia <daniel@danielgarcia.info>
@@ -152,29 +168,27 @@ Daniel Nordberg <dnordberg@gmail.com>
 Daniel Robinson <gottagetmac@gmail.com>
 Daniel Von Fange <daniel@leancoder.com>
 Daniel YC Lin <dlin.tw@gmail.com>
-Dan Keder <dan.keder@gmail.com>
-Dan McPherson <dmcphers@redhat.com>
+Daniel, Dao Quang Minh <dqminh89@gmail.com>
 Danny Berger <dpb587@gmail.com>
 Danny Yates <danny@codeaholics.org>
-Dan Stine <sw@stinemail.com>
-Dan Walsh <dwalsh@redhat.com>
-Dan Williams <me@deedubs.com>
 Darren Coxall <darren@darrencoxall.com>
 Darren Shepherd <darren.s.shepherd@gmail.com>
 David Anderson <dave@natulte.net>
 David Calavera <david.calavera@gmail.com>
 David Corking <dmc-source@dcorking.com>
-Davide Ceretti <davide.ceretti@hogarthww.com>
 David Gageot <david@gageot.net>
 David Gebler <davidgebler@gmail.com>
+David Mat <david@davidmat.com>
 David Mcanulty <github@hellspark.com>
 David Pelaez <pelaez89@gmail.com>
 David Röthlisberger <david@rothlis.net>
 David Sissitka <me@dsissitka.com>
+Davide Ceretti <davide.ceretti@hogarthww.com>
 Dawn Chen <dawnchen@google.com>
 decadent <decadent@users.noreply.github.com>
 Deni Bertovic <deni@kset.org>
 Derek <crq@kernel.org>
+Derek <crquan@gmail.com>
 Derek McGowan <derek@mcgstyle.net>
 Deric Crago <deric.crago@gmail.com>
 Deshi Xiao <dxiao@redhat.com>
@@ -182,14 +196,16 @@ Dinesh Subhraveti <dineshs@altiscale.com>
 Djibril Koné <kone.djibril@gmail.com>
 dkumor <daniel@dkumor.com>
 Dmitry Demeshchuk <demeshchuk@gmail.com>
+Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
 Dolph Mathews <dolph.mathews@gmail.com>
 Dominik Honnef <dominik@honnef.co>
+Don Kjer <don.kjer@gmail.com>
 Don Spaulding <donspauldingii@gmail.com>
 Doug Davis <dug@us.ibm.com>
 doug tangren <d.tangren@gmail.com>
+Dr Nic Williams <drnicwilliams@gmail.com>
 dragon788 <dragon788@users.noreply.github.com>
 Dražen Lučanin <kermit666@gmail.com>
-Dr Nic Williams <drnicwilliams@gmail.com>
 Dustin Sallings <dustin@spy.net>
 Edmund Wagner <edmund-wagner@web.de>
 Eiichi Tsukata <devel@etsukata.com>
@@ -197,19 +213,22 @@ Eike Herzbach <eike@herzbach.net>
 Eivind Uggedal <eivind@uggedal.com>
 Elias Probst <mail@eliasprobst.eu>
 Emil Hernvall <emil@quench.at>
+Emily Maier <emily@emilymaier.net>
 Emily Rose <emily@contactvibe.com>
 Eric Hanchrow <ehanchrow@ine.com>
 Eric Lee <thenorthsecedes@gmail.com>
 Eric Myhre <hash@exultant.us>
 Eric Paris <eparis@redhat.com>
 Eric Windisch <ewindisch@docker.com>
+Erik Dubbelboer <erik@dubbelboer.com>
 Erik Hollensbe <github@hollensbe.org>
 Erik Inge Bolsø <knan@redpill-linpro.com>
 Erik Kristensen <erik@erikkristensen.com>
 Erno Hopearuoho <erno.hopearuoho@gmail.com>
+Erwin van der Koogh <info@erronis.nl>
 Eugene Yakubovich <eugene.yakubovich@coreos.com>
 eugenkrizo <eugen.krizo@gmail.com>
-evanderkoogh <info@erronis.nl>
+Evan Carmi <carmi@users.noreply.github.com>
 Evan Hazlett <ejhazlett@gmail.com>
 Evan Krall <krall@yelp.com>
 Evan Phoenix <evan@fallingsnow.net>
@@ -230,9 +249,9 @@ Francisco Carriedo <fcarriedo@gmail.com>
 Francisco Souza <f@souza.cc>
 Frank Macreery <frank@macreery.com>
 Frank Rosquin <frank.rosquin+github@gmail.com>
+Fred Lifton <fred.lifton@docker.com>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 Frederik Loeffert <frederik@zitrusmedia.de>
-Fred Lifton <fred.lifton@docker.com>
 Freek Kalter <freek@kalteronline.org>
 Gabe Rosenhouse <gabe@missionst.com>
 Gabor Nagy <mail@aigeruth.hu>
@@ -266,13 +285,15 @@ Hector Castro <hectcastro@gmail.com>
 Henning Sprang <henning.sprang@gmail.com>
 Hobofan <goisser94@gmail.com>
 Hollie Teal <hollie@docker.com>
+Hu Keping <hukeping@huawei.com>
+Hu Tao <hutao@cn.fujitsu.com>
 Huayi Zhang <irachex@gmail.com>
 Hugo Duncan <hugo@hugoduncan.org>
 Hunter Blanks <hunter@twilio.com>
-Hu Tao <hutao@cn.fujitsu.com>
 Huu Nguyen <huu@prismskylabs.com>
 hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
 Ian Babrou <ibobrik@gmail.com>
+Ian Bishop <ianbishop@pace7.com>
 Ian Bull <irbull@gmail.com>
 Ian Main <imain@redhat.com>
 Ian Truslove <ian.truslove@gmail.com>
@@ -284,8 +305,10 @@ Isabel Jimenez <contact.isabeljimenez@gmail.com>
 Isao Jonas <isao.jonas@gmail.com>
 Ivan Fraixedes <ifcdev@gmail.com>
 Jack Danger Canty <jackdanger@squareup.com>
-jakedt <jake@devtable.com>
+Jacob Atzen <jacob@jacobatzen.dk>
+Jacob Edelman <edelman.jd@gmail.com>
 Jake Moshenko <jake@devtable.com>
+jakedt <jake@devtable.com>
 James Allen <jamesallen0108@gmail.com>
 James Carr <james.r.carr@gmail.com>
 James DeFelice <james.defelice@ishisystems.com>
@@ -306,49 +329,52 @@ Jason Plum <jplum@devonit.com>
 Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
 Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
 Jean-Paul Calderone <exarkun@twistedmatrix.com>
+Jean-Tiare Le Bigot <jt@yadutaf.fr>
+Jeff Anderson <jeff@docker.com>
 Jeff Lindsay <progrium@gmail.com>
-Jeffrey Bolle <jeffreybolle@gmail.com>
 Jeff Welch <whatthejeff@gmail.com>
+Jeffrey Bolle <jeffreybolle@gmail.com>
 Jeremy Grosser <jeremy@synack.me>
-Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
 Jesse Dubay <jesse@thefortytwo.net>
 Jessica Frazelle <jess@docker.com>
 Jezeniel Zapanta <jpzapanta22@gmail.com>
 Jilles Oldenbeuving <ojilles@gmail.com>
 Jim Alateras <jima@comware.com.au>
-Jimmy Cuadra <jimmy@jimmycuadra.com>
 Jim Perrin <jperrin@centos.org>
+Jimmy Cuadra <jimmy@jimmycuadra.com>
 Jiří Župka <jzupka@redhat.com>
 Joe Beda <joe.github@bedafamily.com>
 Joe Ferguson <joe@infosiftr.com>
-Joel Handwell <joelhandwell@gmail.com>
 Joe Shaw <joe@joeshaw.org>
 Joe Van Dyk <joe@tanga.com>
+Joel Friedly <joelfriedly@gmail.com>
+Joel Handwell <joelhandwell@gmail.com>
 Joffrey F <joffrey@docker.com>
 Johan Euphrosine <proppy@google.com>
-Johannes 'fish' Ziemke <github@freigeist.org>
 Johan Rydberg <johan.rydberg@gmail.com>
+Johannes 'fish' Ziemke <github@freigeist.org>
 John Costa <john.costa@gmail.com>
 John Feminella <jxf@jxf.me>
 John Gardiner Myers <jgmyers@proofpoint.com>
 John Gossman <johngos@microsoft.com>
 John OBrien III <jobrieniii@yahoo.com>
 John Warwick <jwarwick@gmail.com>
+Jon Wedaman <jweede@gmail.com>
 Jonas Pfenniger <jonas@pfenniger.name>
+Jonathan A. Sternberg <jonathansternberg@gmail.com>
 Jonathan Boulle <jonathanboulle@gmail.com>
 Jonathan Camp <jonathan@irondojo.com>
 Jonathan McCrohan <jmccrohan@gmail.com>
 Jonathan Mueller <j.mueller@apoveda.ch>
 Jonathan Pares <jonathanpa@users.noreply.github.com>
 Jonathan Rudenberg <jonathan@titanous.com>
-Jon Wedaman <jweede@gmail.com>
 Joost Cassee <joost@cassee.net>
 Jordan Arentsen <blissdev@gmail.com>
 Jordan Sissel <jls@semicomplete.com>
 Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
 Joseph Hager <ajhager@gmail.com>
-Josh Hawn <josh.hawn@docker.com>
 Josh <jokajak@gmail.com>
+Josh Hawn <josh.hawn@docker.com>
 Josh Poimboeuf <jpoimboe@redhat.com>
 Josiah Kiehl <jkiehl@riotgames.com>
 JP <jpellerin@leapfrogonline.com>
@@ -360,6 +386,9 @@ Justin Force <justin.force@gmail.com>
 Justin Plock <jplock@users.noreply.github.com>
 Justin Simonelis <justin.p.simonelis@gmail.com>
 Jyrki Puttonen <jyrkiput@gmail.com>
+Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
+Jörg Thalheim <joerg@higgsboson.tk>
+Kamil Domanski <kamil@domanski.co>
 Karan Lyons <karan@karanlyons.com>
 Karl Grzeszczak <karlgrz@gmail.com>
 Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
@@ -367,14 +396,13 @@ Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
 Keli Hu <dev@keli.hu>
 Ken Cochrane <kencochrane@gmail.com>
 Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
+Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
 Kevin Clark <kevin.clark@gmail.com>
 Kevin J. Lynagh <kevin@keminglabs.com>
 Kevin Menard <kevin@nirvdrum.com>
-Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
 Kevin Wallace <kevin@pentabarf.net>
 Keyvan Fatehi <keyvanfatehi@gmail.com>
 kies <lleelm@gmail.com>
-kim0 <email.ahmedkamal@googlemail.com>
 Kim BKC Carlbacker <kim.carlbacker@gmail.com>
 Kimbro Staken <kstaken@kstaken.com>
 Kiran Gangadharan <kiran.daredevil@gmail.com>
@@ -382,6 +410,7 @@ knappe <tyler.knappe@gmail.com>
 Kohei Tsuruta <coheyxyz@gmail.com>
 Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
+Krasimir Georgiev <support@vip-consult.co.uk>
 krrg <krrgithub@gmail.com>
 Kyle Conroy <kyle.j.conroy@gmail.com>
 kyu <leehk1227@gmail.com>
@@ -397,13 +426,16 @@ Lei Jitang <leijitang@huawei.com>
 Len Weincier <len@cloudafrica.net>
 Leszek Kowalski <github@leszekkowalski.pl>
 Levi Gross <levi@levigross.com>
+Lewis Marshall <lewis@lmars.net>
 Lewis Peckover <lew+github@lew.io>
 Liang-Chi Hsieh <viirya@gmail.com>
 limsy <seongyeol37@gmail.com>
 Lokesh Mandvekar <lsm5@fedoraproject.org>
+Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
 Louis Opter <kalessin@kalessin.fr>
 lukaspustina <lukas.pustina@centerdevice.com>
 lukemarsden <luke@digital-crocus.com>
+Lénaïc Huard <lhuard@amadeus.com>
 Madhu Venugopal <madhu@socketplane.io>
 Mahesh Tiyyagura <tmahesh@gmail.com>
 Malte Janduda <mail@janduda.net>
@@ -412,12 +444,13 @@ Manuel Meurer <manuel@krautcomputing.com>
 Manuel Woelker <github@manuel.woelker.org>
 Marc Abramowitz <marc@marc-abramowitz.com>
 Marc Kuo <kuomarc2@gmail.com>
-Marco Hennings <marco.hennings@freiheit.com>
 Marc Tamsky <mtamsky@gmail.com>
+Marco Hennings <marco.hennings@freiheit.com>
 Marcus Farkas <toothlessgear@finitebox.com>
-marcuslinke <marcus.linke@gmx.de>
+Marcus Linke <marcus.linke@gmx.de>
 Marcus Ramberg <marcus@nordaaker.com>
 Marek Goldmann <marek.goldmann@gmail.com>
+Marianna <mtesselh@gmail.com>
 Marius Voila <marius.voila@gmail.com>
 Mark Allen <mrallen1@yahoo.com>
 Mark McGranaghan <mmcgrana@gmail.com>
@@ -425,7 +458,9 @@ Marko Mikulicic <mmikulicic@gmail.com>
 Marko Tibold <marko@tibold.nl>
 Markus Fix <lispmeister@gmail.com>
 Martijn van Oosterhout <kleptog@svana.org>
+Martin Honermeyer <maze@strahlungsfrei.de>
 Martin Redmond <martin@tinychat.com>
+Mary Anthony <moxieandmore@gmail.com>
 Mason Malone <mason.malone@gmail.com>
 Mateusz Sulima <sulima.mateusz@gmail.com>
 Mathias Monnerville <mathias@monnerville.com>
@@ -435,14 +470,14 @@ Matt Bachmann <bachmann.matt@gmail.com>
 Matt Haggard <haggardii@gmail.com>
 Matthew Heon <mheon@redhat.com>
 Matthew Mueller <mattmuelle@gmail.com>
+Matthew Riley <mattdr@google.com>
 Matthias Klumpp <matthias@tenstral.net>
 Matthias Kühnle <git.nivoc@neverbox.com>
 mattymo <raytrac3r@gmail.com>
 mattyw <mattyw@me.com>
-Maxime Petazzoni <max@signalfuse.com>
-Maxim Treskin <zerthurd@gmail.com>
 Max Shytikov <mshytikov@gmail.com>
-Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
+Maxim Treskin <zerthurd@gmail.com>
+Maxime Petazzoni <max@signalfuse.com>
 meejah <meejah@meejah.ca>
 Mengdi Gao <usrgdd@gmail.com>
 Mert Yazıcıoğlu <merty@users.noreply.github.com>
@@ -451,12 +486,14 @@ Michael Crosby <michael@docker.com>
 Michael Gorsuch <gorsuch@github.com>
 Michael Hudson-Doyle <michael.hudson@linaro.org>
 Michael Neale <michael.neale@gmail.com>
-Michaël Pailloncy <mpapo.dev@gmail.com>
 Michael Prokop <github@michael-prokop.at>
 Michael Scharf <github@scharf.gr>
 Michael Stapelberg <michael+gh@stapelberg.de>
+Michael Steinert <mike.steinert@gmail.com>
 Michael Thies <michaelthies78@gmail.com>
 Michal Jemala <michal.jemala@gmail.com>
+Michal Minar <miminar@redhat.com>
+Michaël Pailloncy <mpapo.dev@gmail.com>
 Michiel@unhosted <michiel@unhosted.org>
 Miguel Angel Fernández <elmendalerenda@gmail.com>
 Mike Chelen <michael.chelen@gmail.com>
@@ -471,12 +508,15 @@ Morten Siebuhr <sbhr@sbhr.dk>
 Mrunal Patel <mrunalp@gmail.com>
 mschurenko <matt.schurenko@gmail.com>
 Mustafa Akın <mustafa91@gmail.com>
+Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
 Nan Monnand Deng <monnand@gmail.com>
 Naoki Orii <norii@cs.cmu.edu>
+Nate Eagleson <nate@nateeag.com>
 Nate Jones <nate@endot.org>
 Nathan Hsieh <hsieh.nathan@gmail.com>
 Nathan Kleyn <nathan@nathankleyn.com>
 Nathan LeClaire <nathan.leclaire@docker.com>
+Neal McBurnett <neal@mcburnett.org>
 Nelson Chen <crazysim@gmail.com>
 Niall O'Higgins <niallo@unworkable.org>
 Nicholas E. Rabenau <nerab@gmx.at>
@@ -491,18 +531,21 @@ NikolaMandic <mn080202@gmail.com>
 noducks <onemannoducks@gmail.com>
 Nolan Darilek <nolan@thewordnerd.info>
 nzwsch <hi@nzwsch.com>
+O.S. Tezer <ostezer@gmail.com>
 OddBloke <daniel@daniel-watkins.co.uk>
 odk- <github@odkurzacz.org>
 Oguz Bilgic <fisyonet@gmail.com>
 Oh Jinkyun <tintypemolly@gmail.com>
 Ole Reifschneider <mail@ole-reifschneider.de>
 Olivier Gambier <dmp42@users.noreply.github.com>
-O.S. Tezer <ostezer@gmail.com>
 pandrew <letters@paulnotcom.se>
+panticz <mail@konczalski.de>
 Pascal Borreli <pascal@borreli.com>
 Pascal Hartig <phartig@rdrei.net>
 Patrick Hemmer <patrick.hemmer@gmail.com>
+Patrick Stapleton <github@gdi2290.com>
 pattichen <craftsbear@gmail.com>
+Paul <paul9869@gmail.com>
 Paul Annesley <paul@annesley.cc>
 Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Hammond <paul@paulhammond.org>
@@ -510,7 +553,6 @@ Paul Jimenez <pj@place.org>
 Paul Lietar <paul@lietar.net>
 Paul Morie <pmorie@gmail.com>
 Paul Nasrat <pnasrat@gmail.com>
-Paul <paul9869@gmail.com>
 Paul Weaver <pauweave@cisco.com>
 Pavlos Ratis <dastergon@gentoo.org>
 Peter Bourgon <peter@bourgon.org>
@@ -518,16 +560,18 @@ Peter Braden <peterbraden@peterbraden.co.uk>
 Peter Ericson <pdericson@gmail.com>
 Peter Salvatore <peter@psftw.com>
 Peter Waller <p@pwaller.net>
+Phil <underscorephil@gmail.com>
 Phil Estes <estesp@linux.vnet.ibm.com>
+Phil Spitler <pspitler@gmail.com>
 Philipp Weissensteiner <mail@philippweissensteiner.com>
 Phillip Alexander <git@phillipalexander.io>
-Phil Spitler <pspitler@gmail.com>
-Phil <underscorephil@gmail.com>
 Piergiuliano Bossi <pgbossi@gmail.com>
-Pierre-Alain RIVIERE <pariviere@ippon.fr>
 Pierre <py@poujade.org>
+Pierre Wacrenier <pierre.wacrenier@gmail.com>
+Pierre-Alain RIVIERE <pariviere@ippon.fr>
 Piotr Bogdan <ppbogdan@gmail.com>
 pixelistik <pixelistik@users.noreply.github.com>
+Porjo <porjo38@yahoo.com.au>
 Prasanna Gautam <prasannagautam@gmail.com>
 Przemek Hejman <przemyslaw.hejman@gmail.com>
 pysqz <randomq@126.com>
@@ -547,6 +591,7 @@ Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
 rgstephens <greg@udon.org>
 Rhys Hiltner <rhys@twitch.tv>
 Richard Harvey <richard@squarecows.com>
+Richard Metzler <richard@paadee.com>
 Richo Healey <richo@psych0tik.net>
 Rick Bradley <rick@users.noreply.github.com>
 Rick van de Loo <rickvandeloo@gmail.com>
@@ -572,25 +617,26 @@ Ryan Fowler <rwfowler@gmail.com>
 Ryan O'Donnell <odonnellryanc@gmail.com>
 Ryan Seto <ryanseto@yak.net>
 Ryan Thomas <rthomas@atlassian.com>
+Rémy Greinhofer <remy.greinhofer@livelovely.com>
 Sam Alba <sam.alba@gmail.com>
 Sam Bailey <cyprix@cyprix.com.au>
 Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
 Sam Reis <sreis@atlassian.com>
 Sam Rijs <srijs@airpost.net>
+Sami Wagiaalla <swagiaal@redhat.com>
 Samuel Andaya <samuel@andaya.net>
 Samuel PHAN <samuel-phan@users.noreply.github.com>
+Satnam Singh <satnam@raintown.org>
 satoru <satorulogic@gmail.com>
 Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
 Scott Bessler <scottbessler@gmail.com>
 Scott Collier <emailscottcollier@gmail.com>
 Scott Johnston <scott@docker.com>
+Scott Stamp <scottstamp851@gmail.com>
 Scott Walls <sawalls@umich.edu>
 Sean Cronin <seancron@gmail.com>
 Sean P. Kane <skane@newrelic.com>
 Sebastiaan van Stijn <github@gone.nl>
-Sébastien Luttringer <seblu@seblu.net>
-Sébastien <sebastien@yoozio.com>
-Sébastien Stormacq <sebsto@users.noreply.github.com>
 Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
 SeongJae Park <sj38.park@gmail.com>
 Shane Canon <scanon@lbl.gov>
@@ -598,12 +644,12 @@ shaunol <shaunol@gmail.com>
 Shawn Landden <shawn@churchofgit.com>
 Shawn Siefkas <shawn.siefkas@meredith.com>
 Shih-Yuan Lee <fourdollars@gmail.com>
+Shishir Mahajan <shishir.mahajan@redhat.com>
 shuai-z <zs.broccoli@gmail.com>
 Silas Sewell <silas@sewell.org>
 Simon Taranto <simon.taranto@gmail.com>
 Sindhu S <sindhus@live.in>
 Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
-s-ko <aleks@s-ko.net>
 Solomon Hykes <solomon@docker.com>
 Song Gao <song@gao.io>
 Soulou <leo@unbekandt.eu>
@@ -611,18 +657,23 @@ soulshake <amy@gandi.net>
 Sridatta Thatipamala <sthatipamala@gmail.com>
 Sridhar Ratnakumar <sridharr@activestate.com>
 Srini Brahmaroutu <sbrahma@us.ibm.com>
+Srini Brahmaroutu <srbrahma@us.ibm.com>
 Steeve Morin <steeve.morin@gmail.com>
 Stefan Praszalowicz <stefan@greplin.com>
 Stephen Crosby <stevecrozz@gmail.com>
 Steven Burgess <steven.a.burgess@hotmail.com>
 Steven Merrill <steven.merrill@gmail.com>
-sudosurootdev <sudosurootdev@gmail.com>
 Sven Dowideit <SvenDowideit@home.org.au>
 Sylvain Bellemare <sylvain.bellemare@ezeep.com>
+Sébastien <sebastien@yoozio.com>
+Sébastien Luttringer <seblu@seblu.net>
+Sébastien Stormacq <sebsto@users.noreply.github.com>
 tang0th <tang0th@gmx.com>
 Tatsuki Sugiura <sugi@nemui.org>
+Tatsushi Inagaki <e29253@jp.ibm.com>
 Ted M. Young <tedyoung@gmail.com>
 Tehmasp Chaudhri <tehmasp@gmail.com>
+Tejesh Mehta <tejesh.mehta@gmail.com>
 Thatcher Peskens <thatcher@docker.com>
 Thermionix <bond711@gmail.com>
 Thijs Terlouw <thijsterlouw@gmail.com>
@@ -636,11 +687,10 @@ Tianon Gravi <admwiggin@gmail.com>
 Tibor Vass <teabee89@gmail.com>
 Tim Bosse <taim@bosboot.org>
 Tim Hockin <thockin@google.com>
-Timothy Hobbs <timothyhobbs@seznam.cz>
 Tim Ruffles <oi@truffles.me.uk>
 Tim Smith <timbot@google.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
-tjmehta <tj@init.me>
+Timothy Hobbs <timothyhobbs@seznam.cz>
 tjwebb123 <tjwebb123@users.noreply.github.com>
 tobe <tobegit3hub@gmail.com>
 Tobias Bieniek <Tobias.Bieniek@gmx.de>
@@ -648,10 +698,12 @@ Tobias Gesellchen <tobias@gesellix.de>
 Tobias Schmidt <ts@soundcloud.com>
 Tobias Schwab <tobias.schwab@dynport.de>
 Todd Lunter <tlunter@gmail.com>
-Tomasz Lipinski <tlipinski@users.noreply.github.com>
 Tom Fotherby <tom+github@peopleperhour.com>
 Tom Hulihan <hulihan.tom159@gmail.com>
 Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
+Tomas Tomecek <ttomecek@redhat.com>
+Tomasz Lipinski <tlipinski@users.noreply.github.com>
+Tomasz Nurkiewicz <nurkiewicz@gmail.com>
 Tommaso Visconti <tommaso.visconti@gmail.com>
 Tonis Tiigi <tonistiigi@gmail.com>
 Tony Daws <tony@daws.ca>
@@ -662,7 +714,8 @@ Trent Ogren <tedwardo2@gmail.com>
 Tyler Brock <tyler.brock@gmail.com>
 Tzu-Jung Lee <roylee17@gmail.com>
 Ulysse Carion <ulyssecarion@gmail.com>
-unclejack <unclejacksons@gmail.com>
+unknown <sebastiaan@ws-key-sebas3.dpi1.dpi>
+Vaidas Jablonskis <jablonskis@gmail.com>
 vgeta <gopikannan.venugopalsamy@gmail.com>
 Victor Coisne <victor.coisne@dotcloud.com>
 Victor Lyuboslavsky <victor@victoreda.com>
@@ -691,15 +744,15 @@ Walter Leibbrandt <github@wrl.co.za>
 Walter Stanish <walter@pratyeka.org>
 Ward Vandewege <ward@jhvc.com>
 WarheadsSE <max@warheads.net>
+Wayne Chang <wayne@neverfear.org>
 Wes Morgan <cap10morgan@gmail.com>
 Will Dietz <w@wdtz.org>
+Will Rouesnel <w.rouesnel@gmail.com>
+Will Weaver <monkey@buildingbananas.com>
 William Delanoue <william.delanoue@gmail.com>
 William Henry <whenry@redhat.com>
 William Riancho <wr.wllm@gmail.com>
 William Thurston <thurstw@amazon.com>
-Will Rouesnel <w.rouesnel@gmail.com>
-Will Weaver <monkey@buildingbananas.com>
-wyc <wayne@neverfear.org>
 Xiuming Chen <cc@cxm.cc>
 xuzhaokui <cynicholas@gmail.com>
 Yang Bai <hamo.by@gmail.com>
@@ -715,4 +768,6 @@ Zilin Du <zilin.du@gmail.com>
 zimbatm <zimbatm@zimbatm.com>
 Zoltan Tombol <zoltan.tombol@gmail.com>
 zqh <zqhxuyuan@gmail.com>
+Álex González <agonzalezro@gmail.com>
+Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
 尹吉峰 <jifeng.yin@gmail.com>

+ 39 - 0
CHANGELOG.md

@@ -1,5 +1,44 @@
 # Changelog
 
+## 1.5.0 (2015-02-10)
+
+#### Builder
++ Dockerfile to use for a given `docker build` can be specified with the `-f` flag
+* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache
+* ADD and COPY instructions accept relative paths
+* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier
+* Improve performance when exposing a large number of ports
+
+#### Hack
++ Allow client-side only integration tests for Windows
+* Include docker-py integration tests against Docker daemon as part of our test suites
+
+#### Packaging
++ Support for the new version of the registry HTTP API
+* Speed up `docker push` for images with a majority of already existing layers
+- Fixed contacting a private registry through a proxy
+
+#### Remote API
++ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command
++ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command
+* Container `inspect` endpoint show the ID of `exec` commands running in this container
+* Container `inspect` endpoint show the number of times Docker auto-restarted the container
+* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start'
+- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes
+
+#### Runtime
++ Docker daemon has full IPv6 support
++ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools
++ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted
++ Container total memory usage can be limited for `docker run` using the `—memory-swap` flag
+* Major stability improvements for devicemapper storage driver
+* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted
+* Better integration with host system: per-container iptable rules are moved to the DOCKER chain
+- Fixed container exiting on out of memory to return an invalid exit code
+
+#### Other
+* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon
+
 ## 1.4.1 (2014-12-15)
 
 #### Runtime

+ 40 - 13
CONTRIBUTING.md

@@ -64,6 +64,45 @@ Please also include the steps required to reproduce the problem if
 possible and applicable.  This information will help us review and fix
 your issue faster.
 
+### Template
+
+```
+Description of problem:
+
+
+`docker version`:
+
+
+`docker info`:
+
+
+`uname -a`:
+
+
+Environment details (AWS, VirtualBox, physical, etc.):
+
+
+How reproducible:
+
+
+Steps to Reproduce:
+1.
+2.
+3.
+
+
+Actual Results:
+
+
+Expected Results:
+
+
+Additional info:
+
+
+
+```
+
 ## Build Environment
 
 For instructions on setting up your development environment, please
@@ -172,7 +211,7 @@ component affected. For example, if a change affects `docs/` and `registry/`, it
 needs an absolute majority from the maintainers of `docs/` AND, separately, an
 absolute majority of the maintainers of `registry/`.
 
-For more details see [MAINTAINERS.md](project/MAINTAINERS.md)
+For more details see [MAINTAINERS](MAINTAINERS)
 
 ### Sign your work
 
@@ -233,18 +272,6 @@ Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still
 accepted, so there is no need to update outstanding pull requests to the new
 format right away, but please do adjust your processes for future contributions.
 
-#### Small patch exception
-
-There are several exceptions to the signing requirement. Currently these are:
-
-* Your patch fixes spelling or grammar errors.
-* Your patch is a single line change to documentation contained in the
-  `docs` directory.
-* Your patch fixes Markdown formatting or syntax errors in the
-  documentation contained in the `docs` directory.
-
-If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.com)
-
 ### How can I become a maintainer?
 
 * Step 1: Learn the component inside out

+ 85 - 34
Dockerfile

@@ -23,11 +23,12 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-FROM	ubuntu:14.04
-MAINTAINER	Tianon Gravi <admwiggin@gmail.com> (@tianon)
+FROM ubuntu:14.04
+MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
 
 # Packaged dependencies
-RUN	apt-get update && apt-get install -y \
+RUN apt-get update && apt-get install -y \
+	apparmor \
 	aufs-tools \
 	automake \
 	btrfs-tools \
@@ -39,9 +40,11 @@ RUN	apt-get update && apt-get install -y \
 	libapparmor-dev \
 	libcap-dev \
 	libsqlite3-dev \
-	lxc=1.0* \
 	mercurial \
 	parallel \
+	python-mock \
+	python-pip \
+	python-websocket \
 	reprepro \
 	ruby1.9.1 \
 	ruby1.9.1-dev \
@@ -49,67 +52,115 @@ RUN	apt-get update && apt-get install -y \
 	--no-install-recommends
 
 # Get lvm2 source for compiling statically
-RUN	git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
+RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2
 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
-# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
 
 # Compile and install lvm2
-RUN	cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
+RUN cd /usr/local/lvm2 \
+	&& ./configure --enable-static_link \
+	&& make device-mapper \
+	&& make install_device-mapper
 # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
+# Install lxc
+ENV LXC_VERSION 1.0.7
+RUN mkdir -p /usr/src/lxc \
+	&& curl -sSL https://linuxcontainers.org/downloads/lxc/lxc-${LXC_VERSION}.tar.gz | tar -v -C /usr/src/lxc/ -xz --strip-components=1
+RUN cd /usr/src/lxc \
+	&& ./configure \
+	&& make \
+	&& make install \
+	&& ldconfig
+
 # Install Go
-RUN	curl -sSL https://golang.org/dl/go1.3.3.src.tar.gz | tar -v -C /usr/local -xz
-ENV	PATH	/usr/local/go/bin:$PATH
-ENV	GOPATH	/go:/go/src/github.com/docker/docker/vendor
-ENV PATH /go/bin:$PATH
-RUN	cd /usr/local/go/src && ./make.bash --no-clean 2>&1
+ENV GO_VERSION 1.4.1
+RUN curl -sSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/local -xz \
+	&& mkdir -p /go/bin
+ENV PATH /go/bin:/usr/local/go/bin:$PATH
+ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
+RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
 
 # Compile Go for cross compilation
-ENV	DOCKER_CROSSPLATFORMS	\
+ENV DOCKER_CROSSPLATFORMS \
 	linux/386 linux/arm \
 	darwin/amd64 darwin/386 \
-	freebsd/amd64 freebsd/386 freebsd/arm 
-#	windows is experimental for now
+	freebsd/amd64 freebsd/386 freebsd/arm
+
+# TODO when https://jenkins.dockerproject.com/job/Windows/ is green, add windows back to the list above
 #	windows/amd64 windows/386
 
 # (set an explicit GOARM of 5 for maximum compatibility)
-ENV	GOARM	5
-RUN	cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
+ENV GOARM 5
+RUN cd /usr/local/go/src \
+	&& set -x \
+	&& for platform in $DOCKER_CROSSPLATFORMS; do \
+		GOOS=${platform%/*} \
+		GOARCH=${platform##*/} \
+			./make.bash --no-clean 2>&1; \
+	done
+
+# We still support compiling with older Go, so need to grab older "gofmt"
+ENV GOFMT_VERSION 1.3.3
+RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt
 
 # Grab Go's cover tool for dead-simple code coverage testing
-RUN	go get golang.org/x/tools/cmd/cover
+RUN go get golang.org/x/tools/cmd/cover
 
 # TODO replace FPM with some very minimal debhelper stuff
-RUN	gem install --no-rdoc --no-ri fpm --version 1.3.2
-
-# Install man page generator
-RUN mkdir -p /go/src/github.com/cpuguy83 \
-    && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
-    && cd /go/src/github.com/cpuguy83/go-md2man \
-    && go get -v ./...
+RUN gem install --no-rdoc --no-ri fpm --version 1.3.2
 
 # Get the "busybox" image source so we can build locally instead of pulling
-RUN	git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
+RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
 
 # Get the "cirros" image source so we can import it instead of fetching it during tests
-RUN	curl -sSL -o /cirros.tar.gz https://github.com/ewindisch/docker-cirros/raw/1cded459668e8b9dbf4ef976c94c05add9bbd8e9/cirros-0.3.0-x86_64-lxc.tar.gz
+RUN curl -sSL -o /cirros.tar.gz https://github.com/ewindisch/docker-cirros/raw/1cded459668e8b9dbf4ef976c94c05add9bbd8e9/cirros-0.3.0-x86_64-lxc.tar.gz
+
+# Install registry
+ENV REGISTRY_COMMIT c448e0416925a9876d5576e412703c9b8b865e19
+RUN set -x \
+	&& git clone https://github.com/docker/distribution.git /go/src/github.com/docker/distribution \
+	&& (cd /go/src/github.com/docker/distribution && git checkout -q $REGISTRY_COMMIT) \
+	&& GOPATH=/go/src/github.com/docker/distribution/Godeps/_workspace:/go \
+		go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry
+
+# Get the "docker-py" source so we can run their integration tests
+ENV DOCKER_PY_COMMIT aa19d7b6609c6676e8258f6b900dea2eda1dbe95
+RUN git clone https://github.com/docker/docker-py.git /docker-py \
+	&& cd /docker-py \
+	&& git checkout -q $DOCKER_PY_COMMIT
 
 # Setup s3cmd config
-RUN	/bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > $HOME/.s3cfg
+RUN { \
+		echo '[default]'; \
+		echo 'access_key=$AWS_ACCESS_KEY'; \
+		echo 'secret_key=$AWS_SECRET_KEY'; \
+	} > ~/.s3cfg
 
 # Set user.email so crosbymichael's in-container merge commits go smoothly
-RUN	git config --global user.email 'docker-dummy@example.com'
+RUN git config --global user.email 'docker-dummy@example.com'
 
 # Add an unprivileged user to be used for tests which need it
 RUN groupadd -r docker
 RUN useradd --create-home --gid docker unprivilegeduser
 
-VOLUME	/var/lib/docker
-WORKDIR	/go/src/github.com/docker/docker
-ENV	DOCKER_BUILDTAGS	apparmor selinux btrfs_noversion
+VOLUME /var/lib/docker
+WORKDIR /go/src/github.com/docker/docker
+ENV DOCKER_BUILDTAGS apparmor selinux btrfs_noversion
+
+# Install man page generator
+COPY vendor /go/src/github.com/docker/docker/vendor
+# (copy vendor/ because go-md2man needs golang.org/x/net)
+RUN set -x \
+	&& git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
+	&& git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday \
+	&& go install -v github.com/cpuguy83/go-md2man
+
+# install toml validator
+RUN git clone -b v0.1.0 https://github.com/BurntSushi/toml.git /go/src/github.com/BurntSushi/toml \
+    && go install -v github.com/BurntSushi/toml/cmd/tomlv
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
-ENTRYPOINT	["hack/dind"]
+ENTRYPOINT ["hack/dind"]
 
 # Upload docker source
-COPY	.	/go/src/github.com/docker/docker
+COPY . /go/src/github.com/docker/docker

+ 1 - 1
LICENSE

@@ -176,7 +176,7 @@
 
    END OF TERMS AND CONDITIONS
 
-   Copyright 2014 Docker, Inc.
+   Copyright 2013-2015 Docker, Inc.
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.

+ 590 - 9
MAINTAINERS

@@ -1,9 +1,590 @@
-Solomon Hykes <solomon@docker.com> (@shykes)
-Victor Vieux <vieux@docker.com> (@vieux)
-Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
-.mailmap: Tianon Gravi <admwiggin@gmail.com> (@tianon)
-.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
-AUTHORS: Tianon Gravi <admwiggin@gmail.com> (@tianon)
-Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
-Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
-.dockerignore: Tianon Gravi <admwiggin@gmail.com> (@tianon)
+# Docker maintainers file
+#
+# This file describes who runs the Docker project and how.
+# This is a living document - if you see something out of date or missing,
+# speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant
+# parser.
+
+[Rules]
+
+	[Rules.maintainers]
+
+	title = "What is a maintainer?"
+
+	text = """
+There are different types of maintainers, with different responsibilities, but
+all maintainers have 3 things in common:
+
+1) They share responsibility in the project's success.
+2) They have made a long-term, recurring time investment to improve the project.
+3) They spend that time doing whatever needs to be done, not necessarily what
+is the most interesting or fun.
+
+Maintainers are often under-appreciated, because their work is harder to appreciate.
+It's easy to appreciate a really cool and technically advanced feature. It's harder
+to appreciate the absence of bugs, the slow but steady improvement in stability,
+or the reliability of a release process. But those things distinguish a good
+project from a great one.
+"""
+
+	[Rules.bdfl]
+
+		title = "The Benevolent dictator for life (BDFL)"
+
+		text = """
+Docker follows the timeless, highly efficient and totally unfair system
+known as [Benevolent dictator for
+life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with
+yours truly, Solomon Hykes, in the role of BDFL. This means that all
+decisions are made, by default, by Solomon. Since making every decision
+myself would be highly un-scalable, in practice decisions are spread
+across multiple maintainers.
+
+Ideally, the BDFL role is like the Queen of England: awesome crown, but not
+an actual operational role day-to-day. The real job of a BDFL is to NEVER GO AWAY.
+Every other rule can change, perhaps drastically so, but the BDFL will always
+be there, preserving the philosophy and principles of the project, and keeping
+ultimate authority over its fate. This gives us great flexibility in experimenting
+with various governance models, knowing that we can always press the "reset" button
+without fear of fragmentation or deadlock. See the US congress for a counter-example.
+
+BDFL daily routine:
+
+* Is the project governance stuck in a deadlock or irreversibly fragmented?
+	* If yes: refactor the project governance
+* Are there issues or conflicts escalated by core?
+	* If yes: resolve them
+* Go back to polishing that crown.
+"""
+
+	[Rules.decisions]
+
+		title = "How are decisions made?"
+
+		text = """
+Short answer: EVERYTHING IS A PULL REQUEST.
+
+Docker is an open-source project with an open design philosophy. This
+means that the repository is the source of truth for EVERY aspect of the
+project, including its philosophy, design, road map, and APIs. *If it's
+part of the project, it's in the repo. If it's in the repo, it's part of
+the project.*
+
+As a result, all decisions can be expressed as changes to the
+repository. An implementation change is a change to the source code. An
+API change is a change to the API specification. A philosophy change is
+a change to the philosophy manifesto, and so on.
+
+All decisions affecting Docker, big and small, follow the same 3 steps:
+
+* Step 1: Open a pull request. Anyone can do this.
+
+* Step 2: Discuss the pull request. Anyone can do this.
+
+* Step 3: Merge or refuse the pull request. Who does this depends on the nature
+of the pull request and which areas of the project it affects. See *review flow*
+for details.
+
+Because Docker is such a large and active project, it's important for everyone to know
+who is responsible for deciding what. That is determined by a precise set of rules.
+
+* For every *decision* in the project, the rules should designate, in a deterministic way,
+who should *decide*.
+
+* For every *problem* in the project, the rules should designate, in a deterministic way,
+who should be responsible for *fixing* it.
+
+* For every *question* in the project, the rules should designate, in a deterministic way,
+who should be expected to have the *answer*.
+"""
+
+	[Rules.review]
+
+		title = "Review flow"
+
+		text = """
+Pull requests should be processed according to the following flow:
+
+* For each subsystem affected by the change, the maintainers of the subsystem must approve or refuse it.
+It is the responsibility of the subsystem maintainers to process patches affecting them in a timely
+manner.
+
+* If the change affects areas of the code which are not part of a subsystem,
+or if subsystem maintainers are unable to reach a timely decision, it must be approved by 
+the core maintainers.
+
+* If the change affects the UI or public APIs, or if it represents a major change in architecture,
+the architects must approve or refuse it.
+
+* If the change affects the operations of the project, it must be approved or rejected by
+the relevant operators.
+
+* If the change affects the governance, philosophy, goals or principles of the project,
+it must be approved by BDFL.
+
+* A pull request can be in 1 of 5 distinct states, for each of which there is a corresponding label
+that needs to be applied. `Rules.review.states` contains the list of states with possible targets
+for each.
+"""
+
+		# Triage
+		[Rules.review.states.0-triage]
+
+			# Maintainers are expected to triage new incoming pull requests by removing
+			# the `0-triage` label and adding the correct labels (e.g. `1-design-review`)
+			# potentially skipping some steps depending on the kind of pull request.
+			# Use common sense for judging.
+			#
+			# Checking for DCO should be done at this stage.
+			#
+			# If an owner, responsible for closing or merging, can be assigned to the PR,
+			# the better.
+
+			close = "e.g. unresponsive contributor without DCO"
+			3-docs-review = "non-proposal documentation-only change"
+			2-code-review = "e.g. trivial bugfix"
+			1-design-review = "general case"
+
+		# Design review
+		[Rules.review.states.1-design-review]
+
+			# Maintainers are expected to comment on the design of the pull request.
+			# Review of documentation is expected only in the context of design validation,
+			# not for stylistic changes.
+			#
+			# Ideally, documentation should reflect the expected behavior of the code.
+			# No code review should take place in this step.
+			#
+			# Once design is approved, a maintainer should make sure to remove this label
+			# and add the next one.
+
+			close = "design rejected"
+			3-docs-review = "proposals with only documentation changes"
+			2-code-review = "general case"
+
+		# Code review
+		[Rules.review.states.2-code-review]
+
+			# Maintainers are expected to review the code and ensure that it is good
+			# quality and in accordance with the documentation in the PR.
+			#
+			# If documentation is absent but expected, maintainers should ask for documentation.
+			#
+			# All tests should pass.
+			#
+			# Once code is approved according to the rules of the subsystem, a maintainer
+			# should make sure to remove this label and add the next one.
+
+			close = ""
+			1-design-review = "raises design concerns"
+			4-merge = "trivial change not impacting documentation"
+			3-docs-review = "general case"
+
+		# Docs review
+		[Rules.review.states.3-docs-review]
+
+			# Maintainers are expected to review the documentation in its bigger context,
+			# ensuring consistency, completeness, validity, and breadth of coverage across
+			# all extent and new documentation.
+			#
+			# They should ask for any editorial change that makes the documentation more
+			# consistent and easier to understand.
+			#
+			# Once documentation is approved, a maintainer should make sure to remove this
+			# label and add the next one.
+
+			close = ""
+			2-code-review = "requires more code changes"
+			1-design-review = "raises design concerns"
+			4-merge = "general case"
+
+		# Merge
+		[Rules.review.states.4-merge]
+
+			# Maintainers are expected to merge this pull request as soon as possible.
+			# They can ask for a rebase, or carry the pull request themselves.
+			# These should be the easy PRs to merge.
+
+			close = "carry PR"
+			merge = ""
+
+	[Rules.DCO]
+
+	title = "Helping contributors with the DCO"
+
+	text = """
+The [DCO or `Sign your work`](
+https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work)
+requirement is not intended as a roadblock or speed bump.
+
+Some Docker contributors are not as familiar with `git`, or have used a web based
+editor, and thus asking them to `git commit --amend -s` is not the best way forward.
+
+In this case, maintainers can update the commits based on clause (c) of the DCO. The
+most trivial way for a contributor to allow the maintainer to do this, is to add
+a DCO signature in a Pull Requests's comment, or a maintainer can simply note that
+the change is sufficiently trivial that it does not substantivly change the existing
+contribution - i.e., a spelling change.
+
+When you add someone's DCO, please also add your own to keep a log.
+"""
+
+	[Rules.holiday]
+
+	title = "I'm a maintainer, and I'm going on holiday"
+
+	text = """
+Please let your co-maintainers and other contributors know by raising a pull
+request that comments out your `MAINTAINERS` file entry using a `#`.
+"""
+
+	[Rules."no direct push"]
+
+	title = "I'm a maintainer. Should I make pull requests too?"
+
+	text = """
+Yes. Nobody should ever push to master directly. All changes should be
+made through a pull request.
+"""
+
+	[Rules.meta]
+
+	title = "How is this process changed?"
+
+	text = "Just like everything else: by making a pull request :)"
+
+# Current project organization
+[Org]
+
+	bdfl = "shykes"
+
+	# The chief architect is responsible for the overall integrity of the technical architecture
+	# across all subsystems, and the consistency of APIs and UI.
+	# 
+	# Changes to UI, public APIs and overall architecture (for example a plugin system) must
+	# be approved by the chief architect.
+	"Chief Architect" = "shykes"
+
+	# The Chief Operator is responsible for the day-to-day operations of the project including:
+	# - facilitating communications amongst all the contributors;
+	# - tracking release schedules;
+	# - managing the relationship with downstream distributions and upstream dependencies;
+	# - helping new contributors to get involved and become successful contributors and maintainers
+	#
+	# The role is also responsible for managing and measuring the success of the overall project
+	# and ensuring it is governed properly working in concert with the Docker Governance Advisory Board (DGAB).
+	"Chief Operator" = "spf13"
+
+	[Org.Operators]
+
+	# The operators make sure the trains run on time. They are responsible for overall operations
+	# of the project. This includes facilitating communication between all the participants; helping
+	# newcomers get involved and become successful contributors and maintainers; tracking the schedule
+	# of releases; managing the relationship with downstream distributions and upstream dependencies;
+	# define measures of success for the project and measure progress; Devise and implement tools and
+	# processes which make contributors and maintainers happier and more efficient.
+
+
+		[Org.Operators.security]
+
+			people = [
+				"erw"
+			]
+
+		[Org.Operators."monthly meetings"]
+
+			people = [
+				"sven",
+				"tianon"
+			]
+
+		[Org.Operators.infrastructure]
+
+			people = [
+				"jfrazelle",
+				"crosbymichael"
+			]
+
+	# The chief maintainer is responsible for all aspects of quality for the project including
+	# code reviews, usability, stability, security, performance, etc. 
+	# The most important function of the chief maintainer is to lead by example. On the first
+	# day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll
+	# be fine".
+	"Chief Maintainer" = "crosbymichael"
+
+	[Org."Core maintainers"]
+
+	# The Core maintainers are the ghostbusters of the project: when there's a problem others
+	# can't solve, they show up and fix it with bizarre devices and weaponry.
+	# They have final say on technical implementation and coding style.
+	# They are ultimately responsible for quality in all its forms: usability polish,
+	# bugfixes, performance, stability, etc. When ownership  can cleanly be passed to
+	# a subsystem, they are responsible for doing so and holding the
+	# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
+
+	# For each release (including minor releases), a "release captain" is assigned from the
+	# pool of core maintainers. Rotation is encouraged across all maintainers, to ensure
+	# the release process is clear and up-to-date.
+	#
+	# It is common for core maintainers to "branch out" to join or start a subsystem.
+
+
+
+		people = [
+			"unclejack",
+			"crosbymichael",
+			"erikh",
+			"icecrime",
+			"jfrazelle",
+			"lk4d4",
+			"tibor",
+			"vbatts",
+			"vieux",
+			"vish"
+		]
+
+
+	[Org.Subsystems]
+
+	# As the project grows, it gets separated into well-defined subsystems. Each subsystem
+	# has a dedicated group of maintainers, which are dedicated to that subsytem and responsible
+	# for its quality.
+	# This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows.
+	# 
+	# The maintainers of each subsytem are responsible for:
+	# 
+	# 1. Exposing a clear road map for improving their subsystem.
+	# 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem.
+	# 3. Be available to anyone with questions, bug reports, criticism etc.
+	#   on their component. This includes IRC, GitHub requests and the mailing
+	#   list.
+	# 4. Make sure their subsystem respects the philosophy, design and
+	#   road map of the project.
+	#
+	# #### How to review patches to your subsystem
+	# 
+	# Accepting pull requests:
+	# 
+	#   - If the pull request appears to be ready to merge, give it a `LGTM`, which
+	#     stands for "Looks Good To Me".
+	#   - If the pull request has some small problems that need to be changed, make
+	#     a comment adressing the issues.
+	#   - If the changes needed to a PR are small, you can add a "LGTM once the
+	#     following comments are adressed..." this will reduce needless back and
+	#     forth.
+	#   - If the PR only needs a few changes before being merged, any MAINTAINER can
+	#     make a replacement PR that incorporates the existing commits and fixes the
+	#     problems before a fast track merge.
+	# 
+	# Closing pull requests:
+	# 
+	#   - If a PR appears to be abandoned, after having attempted to contact the
+	#     original contributor, then a replacement PR may be made.  Once the
+	#     replacement PR is made, any contributor may close the original one.
+	#   - If you are not sure if the pull request implements a good feature or you
+	#     do not understand the purpose of the PR, ask the contributor to provide
+	#     more documentation.  If the contributor is not able to adequately explain
+	#     the purpose of the PR, the PR may be closed by any MAINTAINER.
+	#   - If a MAINTAINER feels that the pull request is sufficiently architecturally
+	#     flawed, or if the pull request needs significantly more design discussion
+	#     before being considered, the MAINTAINER should close the pull request with
+	#     a short explanation of what discussion still needs to be had.  It is
+	#     important not to leave such pull requests open, as this will waste both the
+	#     MAINTAINER's time and the contributor's time.  It is not good to string a
+	#     contributor on for weeks or months, having them make many changes to a PR
+	#     that will eventually be rejected.
+
+		[Org.Subsystems.Documentation]
+
+			people = [
+				"fredlf",
+				"james",
+				"sven",
+			]
+
+		[Org.Subsystems.libcontainer]
+
+			people = [
+				"crosbymichael",
+				"vmarmol",
+				"mpatel",
+				"jnagal",
+				"lk4d4"
+			]
+
+		[Org.Subsystems.registry]
+
+			people = [
+				"dmp42",
+				"vbatts",
+				"joffrey",
+				"samalba"
+			]
+
+		[Org.Subsystems."build tools"]
+
+			people = [
+				"shykes",
+				"tianon"
+			]
+
+		[Org.Subsystem."remote api"]
+
+			people = [
+				"vieux"
+			]
+
+		[Org.Subsystem.swarm]
+
+			people = [
+				"aluzzardi",
+				"vieux"
+			]
+
+		[Org.Subsystem.machine]
+
+			people = [
+				"bfirsh",
+				"ehazlett"
+			]
+
+		[Org.Subsystem.compose]
+
+			people = [
+				"aanand"
+			]
+
+		[Org.Subsystem.builder]
+
+			people = [
+				"erikh",
+				"tibor",
+				"duglin"
+			]
+
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+	# ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+	[people.aanand]
+	Name = "Aanand Prasad"
+	Email = "aanand@docker.com"
+	GitHub = "aanand"
+
+	[people.aluzzardi]
+	Name = "Andrea Luzzardi"
+	Email = "aluzzardi@docker.com"
+	GitHub = "aluzzardi"
+
+	[people.bfirsh]
+	Name = "Ben Firshman"
+	Email = "ben@firshman.co.uk"
+	GitHub = "bfirsh"
+
+	[people.crosbymichael]
+	Name = "Michael Crosby"
+	Email = "crosbymichael@gmail.com"
+	GitHub = "crosbymichael"
+
+	[people.duglin]
+	Name = "Doug Davis"
+	Email = "dug@us.ibm.com"
+	GitHub = "duglin"
+
+	[people.ehazlett]
+	Name = "Evan Hazlett"
+	Email = "ejhazlett@gmail.com"
+	GitHub = "ehazlett"
+
+	[people.erikh]
+	Name = "Erik Hollensbe"
+	Email = "erik@docker.com"
+	GitHub = "erikh"
+
+	[people.erw]
+	Name = "Eric Windisch"
+	Email = "eric@windisch.us"
+	GitHub = "ewindisch"
+
+	[people.icecrime]
+	Name = "Arnaud Porterie"
+	Email = "arnaud@docker.com"
+	GitHub = "icecrime"
+
+	[people.jfrazelle]
+	Name = "Jessie Frazelle"
+	Email = "jess@docker.com"
+	GitHub = "jfrazelle"
+
+	[people.lk4d4]
+	Name = "Alexander Morozov"
+	Email = "lk4d4@docker.com"
+	GitHub = "lk4d4"
+
+	[people.shykes]
+	Name = "Solomon Hykes"
+	Email = "solomon@docker.com"
+	GitHub = "shykes"
+    
+	[people.spf13]
+	Name = "Steve Francia"
+	Email = "steve.francia@gmail.com"
+	GitHub = "spf13"
+	
+	[people.sven]
+	Name = "Sven Dowideit"
+	Email = "SvenDowideit@home.org.au"
+	GitHub = "SvenDowideit"
+
+	[people.tianon]
+	Name = "Tianon Gravi"
+	Email = "admwiggin@gmail.com"
+	GitHub = "tianon"
+
+	[people.tibor]
+	Name = "Tibor Vass"
+	Email = "tibor@docker.com"
+	GitHub = "tiborvass"
+
+	[people.vbatts]
+	Name = "Vincent Batts"
+	Email = "vbatts@redhat.com"
+	GitHub = "vbatts"
+
+	[people.vieux]
+	Name = "Victor Vieux"
+	Email = "vieux@docker.com"
+	GitHub = "vieux"
+
+	[people.vmarmol]
+	Name = "Victor Marmol"
+	Email = "vmarmol@google.com"
+	GitHub = "vmarmol"
+
+	[people.jnagal]
+	Name = "Rohit Jnagal"
+	Email = "jnagal@google.com"
+	GitHub = "rjnagal"
+
+	[people.mpatel]
+	Name = "Mrunal Patel"
+	Email = "mpatel@redhat.com"
+	GitHub = "mrunalp"
+
+	[people.unclejack]
+	Name = "Cristian Staretu"
+	Email = "cristian.staretu@gmail.com"
+	GitHub = "unclejack"
+
+	[people.vish]
+	Name = "Vishnu Kannan"
+	Email = "vishnuk@google.com"
+	GitHub = "vishh"

+ 12 - 5
Makefile

@@ -1,4 +1,4 @@
-.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate
+.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate
 
 # env vars passed through directly to Docker's build scripts
 # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
@@ -30,7 +30,7 @@ DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
 
 DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
 
-DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET
+DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
 
 # for some docs workarounds (see below in "docs-build" target)
 GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
@@ -53,10 +53,13 @@ docs-shell: docs-build
 	$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
 
 docs-release: docs-build
-	$(DOCKER_RUN_DOCS) -e OPTIONS -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh
+	$(DOCKER_RUN_DOCS) -e OPTIONS -e BUILD_ROOT -e DISTRIBUTION_ID "$(DOCKER_DOCS_IMAGE)" ./release.sh
+
+docs-test: docs-build
+	$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh
 
 test: build
-	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli
+	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli test-docker-py
 
 test-unit: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
@@ -67,8 +70,11 @@ test-integration: build
 test-integration-cli: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
 
+test-docker-py: build
+	$(DOCKER_RUN_DOCKER) hack/make.sh binary test-docker-py
+
 validate: build
-	$(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco
+	$(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco validate-toml
 
 shell: build
 	$(DOCKER_RUN_DOCKER) bash
@@ -77,6 +83,7 @@ build: bundles
 	docker build -t "$(DOCKER_IMAGE)" .
 
 docs-build:
+	( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs docs/ > docs/changed-files
 	cp ./VERSION docs/VERSION
 	echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
 	echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET

+ 55 - 21
README.md

@@ -5,14 +5,14 @@ Docker is an open source project to pack, ship and run any application
 as a lightweight container
 
 Docker containers are both *hardware-agnostic* and *platform-agnostic*.
-This means that they can run anywhere, from your laptop to the largest
+This means they can run anywhere, from your laptop to the largest
 EC2 compute instance and everything in between - and they don't require
-that you use a particular language, framework or packaging system. That
+you to use a particular language, framework or packaging system. That
 makes them great building blocks for deploying and scaling web apps,
-databases and backend services without depending on a particular stack
+databases, and backend services without depending on a particular stack
 or provider.
 
-Docker is an open-source implementation of the deployment engine which
+Docker began as an open-source implementation of the deployment engine which
 powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service.
 It benefits directly from the experience accumulated over several years
 of large-scale operation and support of hundreds of thousands of
@@ -22,7 +22,7 @@ applications and databases.
 
 ## Security Disclosure
 
-Security is very important to us.  If you have any issue regarding security, 
+Security is very important to us. If you have any issue regarding security, 
 please disclose the information responsibly by sending an email to 
 security@docker.com and not by creating a github issue.
 
@@ -59,24 +59,24 @@ now support the primitives necessary for containerization, including
 Linux with [openvz](http://openvz.org),
 [vserver](http://linux-vserver.org) and more recently
 [lxc](http://lxc.sourceforge.net), Solaris with
-[zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc)
+[zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
 and FreeBSD with
 [Jails](http://www.freebsd.org/doc/handbook/jails.html).
 
 Docker builds on top of these low-level primitives to offer developers a
-portable format and runtime environment that solves all 4 problems.
+portable format and runtime environment that solves all four problems.
 Docker containers are small (and their transfer can be optimized with
 layers), they have basically zero memory and cpu overhead, they are
-completely portable and are designed from the ground up with an
+completely portable, and are designed from the ground up with an
 application-centric design.
 
-The best part: because Docker operates at the OS level, it can still be
+Perhaps best of all, because Docker operates at the OS level, it can still be
 run inside a VM!
 
 ## Plays well with others
 
-Docker does not require that you buy into a particular programming
-language, framework, packaging system or configuration language.
+Docker does not require you to buy into a particular programming
+language, framework, packaging system, or configuration language.
 
 Is your application a Unix process? Does it use files, tcp connections,
 environment variables, standard Unix streams and command-line arguments
@@ -100,21 +100,21 @@ This is usually difficult for several reasons:
     typically don't work well with each other, requiring awkward
     custom integrations.
 
-  * Conflicting dependencies. Different applications may depend on
+  * *Conflicting dependencies*. Different applications may depend on
     different versions of the same dependency. Packaging tools handle
     these situations with various degrees of ease - but they all
     handle them in different and incompatible ways, which again forces
     the developer to do extra work.
   
-  * Custom dependencies. A developer may need to prepare a custom
+  * *Custom dependencies*. A developer may need to prepare a custom
     version of their application's dependency. Some packaging systems
     can handle custom versions of a dependency, others can't - and all
     of them handle it differently.
 
 
-Docker solves dependency hell by giving the developer a simple way to
-express *all* their application's dependencies in one place, and
-streamline the process of assembling them. If this makes you think of
+Docker solves the problem of dependency hell by giving the developer a simple
+way to express *all* their application's dependencies in one place, while
+streamlining the process of assembling them. If this makes you think of
 [XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
 *replace* your favorite packaging systems. It simply orchestrates
 their use in a simple and repeatable way. How does it do that? With
@@ -178,19 +178,35 @@ Contributing to Docker
 ======================
 
 [![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker)
-[![Build Status](https://ci.dockerproject.com/github.com/docker/docker/status.svg?branch=master)](https://ci.dockerproject.com/github.com/docker/docker)
+[![Jenkins Build Status](https://jenkins.dockerproject.com/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.com/job/Docker%20Master/)
 
-Want to hack on Docker? Awesome! There are instructions to get you
-started [here](CONTRIBUTING.md). If you'd like to contribute to the
+Want to hack on Docker? Awesome! We have [instructions to help you get
+started](CONTRIBUTING.md). If you'd like to contribute to the
 documentation, please take a look at this [README.md](https://github.com/docker/docker/blob/master/docs/README.md).
 
 These instructions are probably not perfect, please let us know if anything
-feels wrong or incomplete.
+feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
+
+Want to run Docker from a master build? You can download 
+master builds at [master.dockerproject.com](https://master.dockerproject.com). 
+They are updated with each commit merged into the master branch.
+
+Don't know how to use that super cool new feature in the master build? Check
+out the master docs at
+[docs.master.dockerproject.com](http://docs.master.dockerproject.com).
+
+How the project is run
+======================
+
+Docker is a very, very active project. If you want to learn more about how it is run,
+or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project).
+
+We are always open to suggestions on process improvements, and are always looking for more maintainers.
 
 ### Legal
 
 *Brought to you courtesy of our legal counsel. For more context,
-please see the Notice document.*
+please see the "NOTICE" document in this repo.*
 
 Use and transfer of Docker may be subject to certain restrictions by the
 United States and other governments.  
@@ -206,3 +222,21 @@ Docker is licensed under the Apache License, Version 2.0. See
 [LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
 license text.
 
+Other Docker Related Projects
+=============================
+There are a number of projects under development that are based on Docker's
+core technology. These projects expand the tooling built around the
+Docker platform to broaden its application and utility. 
+
+If you know of another project underway that should be listed here, please help
+us keep this list up-to-date by submitting a PR.
+
+* [Docker Registry](https://github.com/docker/docker-registry): Registry 
+server for Docker (hosting/delivering of repositories and images) 
+* [Docker Machine](https://github.com/docker/machine): Machine management 
+for a container-centric world 
+* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering 
+system 
+* [Docker Compose, aka Fig](https://github.com/docker/fig): 
+Multi-container application management
+

+ 1 - 1
VERSION

@@ -1 +1 @@
-1.4.1
+1.5.0

+ 17 - 10
api/client/cli.go

@@ -17,7 +17,6 @@ import (
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/registry"
-	"github.com/docker/libtrust"
 )
 
 type DockerCli struct {
@@ -27,7 +26,7 @@ type DockerCli struct {
 	in         io.ReadCloser
 	out        io.Writer
 	err        io.Writer
-	key        libtrust.PrivateKey
+	keyFile    string
 	tlsConfig  *tls.Config
 	scheme     string
 	// inFd holds file descriptor of the client's STDIN, if it's a valid file
@@ -75,24 +74,31 @@ func (cli *DockerCli) Cmd(args ...string) error {
 	if len(args) > 0 {
 		method, exists := cli.getMethod(args[0])
 		if !exists {
-			fmt.Println("Error: Command not found:", args[0])
-			return cli.CmdHelp()
+			fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0])
+			os.Exit(1)
 		}
 		return method(args[1:]...)
 	}
 	return cli.CmdHelp()
 }
 
-func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
-	flags := flag.NewFlagSet(name, flag.ContinueOnError)
+func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bool) *flag.FlagSet {
+	var errorHandling flag.ErrorHandling
+	if exitOnError {
+		errorHandling = flag.ExitOnError
+	} else {
+		errorHandling = flag.ContinueOnError
+	}
+	flags := flag.NewFlagSet(name, errorHandling)
 	flags.Usage = func() {
 		options := ""
 		if flags.FlagCountUndeprecated() > 0 {
 			options = "[OPTIONS] "
 		}
-		fmt.Fprintf(cli.err, "\nUsage: docker %s %s%s\n\n%s\n\n", name, options, signature, description)
+		fmt.Fprintf(cli.out, "\nUsage: docker %s %s%s\n\n%s\n\n", name, options, signature, description)
+		flags.SetOutput(cli.out)
 		flags.PrintDefaults()
-		os.Exit(2)
+		os.Exit(0)
 	}
 	return flags
 }
@@ -115,7 +121,7 @@ func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error {
 	return nil
 }
 
-func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, proto, addr string, tlsConfig *tls.Config) *DockerCli {
+func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, addr string, tlsConfig *tls.Config) *DockerCli {
 	var (
 		inFd          uintptr
 		outFd         uintptr
@@ -148,6 +154,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey,
 
 	// The transport is created here for reuse during the client session
 	tr := &http.Transport{
+		Proxy:           http.ProxyFromEnvironment,
 		TLSClientConfig: tlsConfig,
 	}
 
@@ -169,7 +176,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey,
 		in:            in,
 		out:           out,
 		err:           err,
-		key:           key,
+		keyFile:       keyFile,
 		inFd:          inFd,
 		outFd:         outFd,
 		isTerminalIn:  isTerminalIn,

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 227 - 294
api/client/commands.go


+ 21 - 1
api/client/hijack.go

@@ -72,6 +72,15 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
 	if err != nil {
 		return nil, err
 	}
+	// When we set up a TCP connection for hijack, there could be long periods
+	// of inactivity (a long running command with no output) that in certain
+	// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+	// state. Setting TCP KeepAlive on the socket connection will prohibit
+	// ECONNTIMEOUT unless the socket connection truly is broken
+	if tcpConn, ok := rawConn.(*net.TCPConn); ok {
+		tcpConn.SetKeepAlive(true)
+		tcpConn.SetKeepAlivePeriod(30 * time.Second)
+	}
 
 	colonPos := strings.LastIndex(addr, ":")
 	if colonPos == -1 {
@@ -134,10 +143,21 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 		return err
 	}
 	req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
-	req.Header.Set("Content-Type", "plain/text")
+	req.Header.Set("Content-Type", "text/plain")
+	req.Header.Set("Connection", "Upgrade")
+	req.Header.Set("Upgrade", "tcp")
 	req.Host = cli.addr
 
 	dial, err := cli.dial()
+	// When we set up a TCP connection for hijack, there could be long periods
+	// of inactivity (a long running command with no output) that in certain
+	// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+	// state. Setting TCP KeepAlive on the socket connection will prohibit
+	// ECONNTIMEOUT unless the socket connection truly is broken
+	if tcpConn, ok := dial.(*net.TCPConn); ok {
+		tcpConn.SetKeepAlive(true)
+		tcpConn.SetKeepAlivePeriod(30 * time.Second)
+	}
 	if err != nil {
 		if strings.Contains(err.Error(), "connection refused") {
 			return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")

+ 3 - 3
api/client/utils.go

@@ -66,7 +66,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b
 	if passAuthInfo {
 		cli.LoadConfigFile()
 		// Resolve the Auth config relevant for this server
-		authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress())
+		authConfig := cli.configFile.Configs[registry.IndexServerAddress()]
 		getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) {
 			buf, err := json.Marshal(authConfig)
 			if err != nil {
@@ -89,7 +89,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b
 	if data != nil {
 		req.Header.Set("Content-Type", "application/json")
 	} else if method == "POST" {
-		req.Header.Set("Content-Type", "plain/text")
+		req.Header.Set("Content-Type", "text/plain")
 	}
 	resp, err := cli.HTTPClient().Do(req)
 	if err != nil {
@@ -135,7 +135,7 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in
 	req.URL.Host = cli.addr
 	req.URL.Scheme = cli.scheme
 	if method == "POST" {
-		req.Header.Set("Content-Type", "plain/text")
+		req.Header.Set("Content-Type", "text/plain")
 	}
 
 	if headers != nil {

+ 7 - 6
api/common.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"mime"
 	"os"
-	"path"
+	"path/filepath"
 	"strings"
 
 	log "github.com/Sirupsen/logrus"
@@ -15,9 +15,10 @@ import (
 )
 
 const (
-	APIVERSION        version.Version = "1.16"
-	DEFAULTHTTPHOST                   = "127.0.0.1"
-	DEFAULTUNIXSOCKET                 = "/var/run/docker.sock"
+	APIVERSION            version.Version = "1.17"
+	DEFAULTHTTPHOST                       = "127.0.0.1"
+	DEFAULTUNIXSOCKET                     = "/var/run/docker.sock"
+	DefaultDockerfileName string          = "Dockerfile"
 )
 
 func ValidateHost(val string) (string, error) {
@@ -54,7 +55,7 @@ func MatchesContentType(contentType, expectedType string) bool {
 // LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
 // otherwise generates a new one
 func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
-	err := os.MkdirAll(path.Dir(trustKeyPath), 0700)
+	err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700)
 	if err != nil {
 		return nil, err
 	}
@@ -68,7 +69,7 @@ func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
 			return nil, fmt.Errorf("Error saving key file: %s", err)
 		}
 	} else if err != nil {
-		return nil, fmt.Errorf("Error loading key file: %s", err)
+		return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
 	}
 	return trustKey, nil
 }

+ 1 - 1
api/server/MAINTAINERS

@@ -1,2 +1,2 @@
 Victor Vieux <vieux@docker.com> (@vieux)
-Johan Euphrosine <proppy@google.com> (@proppy)
+# Johan Euphrosine <proppy@google.com> (@proppy)

+ 83 - 22
api/server/server.go

@@ -27,6 +27,7 @@ import (
 
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
+	"github.com/docker/docker/daemon/networkdriver/portallocator"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/listenbuffer"
 	"github.com/docker/docker/pkg/parsers"
@@ -410,6 +411,19 @@ func getContainersJSON(eng *engine.Engine, version version.Version, w http.Respo
 	return nil
 }
 
+func getContainersStats(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
+		return err
+	}
+	if vars == nil {
+		return fmt.Errorf("Missing parameter")
+	}
+	name := vars["name"]
+	job := eng.Job("container_stats", name)
+	streamJSON(job, w, true)
+	return job.Run()
+}
+
 func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
@@ -738,6 +752,24 @@ func postContainersRestart(eng *engine.Engine, version version.Version, w http.R
 	return nil
 }
 
+func postContainerRename(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
+		return err
+	}
+	if vars == nil {
+		return fmt.Errorf("Missing parameter")
+	}
+
+	newName := r.URL.Query().Get("name")
+	job := eng.Job("container_rename", vars["name"], newName)
+	job.Setenv("t", r.Form.Get("t"))
+	if err := job.Run(); err != nil {
+		return err
+	}
+	w.WriteHeader(http.StatusNoContent)
+	return nil
+}
+
 func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
@@ -887,7 +919,11 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re
 
 	var errStream io.Writer
 
-	fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
+	if _, ok := r.Header["Upgrade"]; ok {
+		fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
+	} else {
+		fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
+	}
 
 	if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
 		errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
@@ -1030,6 +1066,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
 	}
 	job.Stdin.Add(r.Body)
 	job.Setenv("remote", r.FormValue("remote"))
+	job.Setenv("dockerfile", r.FormValue("dockerfile"))
 	job.Setenv("t", r.FormValue("t"))
 	job.Setenv("q", r.FormValue("q"))
 	job.Setenv("nocache", r.FormValue("nocache"))
@@ -1137,7 +1174,12 @@ func postContainerExecStart(eng *engine.Engine, version version.Version, w http.
 
 		var errStream io.Writer
 
-		fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
+		if _, ok := r.Header["Upgrade"]; ok {
+			fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
+		} else {
+			fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
+		}
+
 		if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
 			errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
 			outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
@@ -1250,7 +1292,7 @@ func AttachProfiler(router *mux.Router) {
 	router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
 }
 
-func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) {
+func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) *mux.Router {
 	r := mux.NewRouter()
 	if os.Getenv("DEBUG") != "" {
 		AttachProfiler(r)
@@ -1275,6 +1317,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
 			"/containers/{name:.*}/json":      getContainersByName,
 			"/containers/{name:.*}/top":       getContainersTop,
 			"/containers/{name:.*}/logs":      getContainersLogs,
+			"/containers/{name:.*}/stats":     getContainersStats,
 			"/containers/{name:.*}/attach/ws": wsContainersAttach,
 			"/exec/{id:.*}/json":              getExecByID,
 		},
@@ -1300,6 +1343,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
 			"/containers/{name:.*}/exec":    postContainerExecCreate,
 			"/exec/{name:.*}/start":         postContainerExecStart,
 			"/exec/{name:.*}/resize":        postContainerExecResize,
+			"/containers/{name:.*}/rename":  postContainerRename,
 		},
 		"DELETE": {
 			"/containers/{name:.*}": deleteContainers,
@@ -1331,30 +1375,23 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
 		}
 	}
 
-	return r, nil
+	return r
 }
 
 // ServeRequest processes a single http request to the docker remote api.
 // FIXME: refactor this to be part of Server and not require re-creating a new
 // router each time. This requires first moving ListenAndServe into Server.
-func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) error {
-	router, err := createRouter(eng, false, true, "")
-	if err != nil {
-		return err
-	}
+func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) {
+	router := createRouter(eng, false, true, "")
 	// Insert APIVERSION into the request as a convenience
 	req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path)
 	router.ServeHTTP(w, req)
-	return nil
 }
 
 // serveFd creates an http.Server and sets it up to serve given a socket activated
 // argument.
 func serveFd(addr string, job *engine.Job) error {
-	r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
-	if err != nil {
-		return err
-	}
+	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
 
 	ls, e := systemd.ListenFD(addr)
 	if e != nil {
@@ -1389,7 +1426,7 @@ func serveFd(addr string, job *engine.Job) error {
 }
 
 func lookupGidByName(nameOrGid string) (int, error) {
-	groupFile, err := user.GetGroupFile()
+	groupFile, err := user.GetGroupPath()
 	if err != nil {
 		return -1, err
 	}
@@ -1466,10 +1503,7 @@ func setSocketGroup(addr, group string) error {
 }
 
 func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) {
-	r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
-	if err != nil {
-		return nil, err
-	}
+	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
 
 	if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) {
 		return nil, err
@@ -1493,18 +1527,45 @@ func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) {
 	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
 }
 
+func allocateDaemonPort(addr string) error {
+	host, port, err := net.SplitHostPort(addr)
+	if err != nil {
+		return err
+	}
+
+	intPort, err := strconv.Atoi(port)
+	if err != nil {
+		return err
+	}
+
+	var hostIPs []net.IP
+	if parsedIP := net.ParseIP(host); parsedIP != nil {
+		hostIPs = append(hostIPs, parsedIP)
+	} else if hostIPs, err = net.LookupIP(host); err != nil {
+		return fmt.Errorf("failed to lookup %s address in host specification", host)
+	}
+
+	for _, hostIP := range hostIPs {
+		if _, err := portallocator.RequestPort(hostIP, "tcp", intPort); err != nil {
+			return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err)
+		}
+	}
+	return nil
+}
+
 func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) {
 	if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
 		log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 	}
 
-	r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
+	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
+
+	l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests"))
 	if err != nil {
 		return nil, err
 	}
 
-	l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests"))
-	if err != nil {
+	if err := allocateDaemonPort(addr); err != nil {
 		return nil, err
 	}
 

+ 1 - 3
api/server/server_unit_test.go

@@ -484,9 +484,7 @@ func serveRequestUsingVersion(method, target string, version version.Version, bo
 	if err != nil {
 		t.Fatal(err)
 	}
-	if err := ServeRequest(eng, version, r, req); err != nil {
-		t.Fatal(err)
-	}
+	ServeRequest(eng, version, r, req)
 	return r
 }
 

+ 87 - 0
api/stats/stats.go

@@ -0,0 +1,87 @@
+// This package is used for API stability in the types and response to the
+// consumers of the API stats endpoint.
+package stats
+
+import "time"
+
+type ThrottlingData struct {
+	// Number of periods with throttling active
+	Periods uint64 `json:"periods"`
+	// Number of periods when the container hit its throttling limit.
+	ThrottledPeriods uint64 `json:"throttled_periods"`
+	// Aggregate time the container was throttled for in nanoseconds.
+	ThrottledTime uint64 `json:"throttled_time"`
+}
+
+// All CPU stats are aggregated since container inception.
+type CpuUsage struct {
+	// Total CPU time consumed.
+	// Units: nanoseconds.
+	TotalUsage uint64 `json:"total_usage"`
+	// Total CPU time consumed per core.
+	// Units: nanoseconds.
+	PercpuUsage []uint64 `json:"percpu_usage"`
+	// Time spent by tasks of the cgroup in kernel mode.
+	// Units: nanoseconds.
+	UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+	// Time spent by tasks of the cgroup in user mode.
+	// Units: nanoseconds.
+	UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+type CpuStats struct {
+	CpuUsage       CpuUsage       `json:"cpu_usage"`
+	SystemUsage    uint64         `json:"system_cpu_usage"`
+	ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+type MemoryStats struct {
+	// current res_counter usage for memory
+	Usage uint64 `json:"usage"`
+	// maximum usage ever recorded.
+	MaxUsage uint64 `json:"max_usage"`
+	// TODO(vishh): Export these as stronger types.
+	// all the stats exported via memory.stat.
+	Stats map[string]uint64 `json:"stats"`
+	// number of times memory usage hits limits.
+	Failcnt uint64 `json:"failcnt"`
+	Limit   uint64 `json:"limit"`
+}
+
+type BlkioStatEntry struct {
+	Major uint64 `json:"major"`
+	Minor uint64 `json:"minor"`
+	Op    string `json:"op"`
+	Value uint64 `json:"value"`
+}
+
+type BlkioStats struct {
+	// number of bytes tranferred to and from the block device
+	IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
+	IoServicedRecursive     []BlkioStatEntry `json:"io_serviced_recursive"`
+	IoQueuedRecursive       []BlkioStatEntry `json:"io_queue_recursive"`
+	IoServiceTimeRecursive  []BlkioStatEntry `json:"io_service_time_recursive"`
+	IoWaitTimeRecursive     []BlkioStatEntry `json:"io_wait_time_recursive"`
+	IoMergedRecursive       []BlkioStatEntry `json:"io_merged_recursive"`
+	IoTimeRecursive         []BlkioStatEntry `json:"io_time_recursive"`
+	SectorsRecursive        []BlkioStatEntry `json:"sectors_recursive"`
+}
+
+type Network struct {
+	RxBytes   uint64 `json:"rx_bytes"`
+	RxPackets uint64 `json:"rx_packets"`
+	RxErrors  uint64 `json:"rx_errors"`
+	RxDropped uint64 `json:"rx_dropped"`
+	TxBytes   uint64 `json:"tx_bytes"`
+	TxPackets uint64 `json:"tx_packets"`
+	TxErrors  uint64 `json:"tx_errors"`
+	TxDropped uint64 `json:"tx_dropped"`
+}
+
+type Stats struct {
+	Read        time.Time   `json:"read"`
+	Network     Network     `json:"network,omitempty"`
+	CpuStats    CpuStats    `json:"cpu_stats,omitempty"`
+	MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+	BlkioStats  BlkioStats  `json:"blkio_stats,omitempty"`
+}

+ 1 - 0
builder/MAINTAINERS

@@ -1,2 +1,3 @@
 Tibor Vass <teabee89@gmail.com> (@tiborvass)
 Erik Hollensbe <github@hollensbe.org> (@erikh)
+Doug Davis <dug@us.ibm.com> (@duglin)

+ 27 - 10
builder/dispatchers.go

@@ -12,6 +12,7 @@ import (
 	"io/ioutil"
 	"path/filepath"
 	"regexp"
+	"sort"
 	"strings"
 
 	log "github.com/Sirupsen/logrus"
@@ -20,6 +21,12 @@ import (
 	"github.com/docker/docker/runconfig"
 )
 
+const (
+	// NoBaseImageSpecifier is the symbol used by the FROM
+	// command to specify that no base image is to be used.
+	NoBaseImageSpecifier string = "scratch"
+)
+
 // dispatch with no layer / parsing. This is effectively not a command.
 func nullDispatch(b *Builder, args []string, attributes map[string]bool, original string) error {
 	return nil
@@ -114,6 +121,12 @@ func from(b *Builder, args []string, attributes map[string]bool, original string
 
 	name := args[0]
 
+	if name == NoBaseImageSpecifier {
+		b.image = ""
+		b.noBaseImage = true
+		return nil
+	}
+
 	image, err := b.Daemon.Repositories().LookupImage(name)
 	if b.Pull {
 		image, err = b.pullImage(name)
@@ -171,15 +184,12 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str
 
 	workdir := args[0]
 
-	if workdir[0] == '/' {
-		b.Config.WorkingDir = workdir
-	} else {
-		if b.Config.WorkingDir == "" {
-			b.Config.WorkingDir = "/"
-		}
-		b.Config.WorkingDir = filepath.Join(b.Config.WorkingDir, workdir)
+	if !filepath.IsAbs(workdir) {
+		workdir = filepath.Join("/", b.Config.WorkingDir, workdir)
 	}
 
+	b.Config.WorkingDir = workdir
+
 	return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
 }
 
@@ -193,7 +203,7 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str
 // RUN [ "echo", "hi" ] # echo hi
 //
 func run(b *Builder, args []string, attributes map[string]bool, original string) error {
-	if b.image == "" {
+	if b.image == "" && !b.noBaseImage {
 		return fmt.Errorf("Please provide a source image with `from` prior to run")
 	}
 
@@ -326,14 +336,21 @@ func expose(b *Builder, args []string, attributes map[string]bool, original stri
 		return err
 	}
 
+	// instead of using ports directly, we build a list of ports and sort it so
+	// the order is consistent. This prevents cache burst where map ordering
+	// changes between builds
+	portList := make([]string, len(ports))
+	var i int
 	for port := range ports {
 		if _, exists := b.Config.ExposedPorts[port]; !exists {
 			b.Config.ExposedPorts[port] = struct{}{}
 		}
+		portList[i] = string(port)
+		i++
 	}
+	sort.Strings(portList)
 	b.Config.PortSpecs = nil
-
-	return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
 }
 
 // USER foo

+ 82 - 32
builder/evaluator.go

@@ -24,13 +24,15 @@ import (
 	"fmt"
 	"io"
 	"os"
-	"path"
+	"path/filepath"
 	"strings"
 
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/fileutils"
+	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/tarsum"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
@@ -104,13 +106,14 @@ type Builder struct {
 	// both of these are controlled by the Remove and ForceRemove options in BuildOpts
 	TmpContainers map[string]struct{} // a map of containers used for removes
 
-	dockerfile  *parser.Node  // the syntax tree of the dockerfile
-	image       string        // image name for commit processing
-	maintainer  string        // maintainer name. could probably be removed.
-	cmdSet      bool          // indicates is CMD was set in current Dockerfile
-	context     tarsum.TarSum // the context is a tarball that is uploaded by the client
-	contextPath string        // the path of the temporary directory the local context is unpacked to (server side)
-
+	dockerfileName string        // name of Dockerfile
+	dockerfile     *parser.Node  // the syntax tree of the dockerfile
+	image          string        // image name for commit processing
+	maintainer     string        // maintainer name. could probably be removed.
+	cmdSet         bool          // indicates is CMD was set in current Dockerfile
+	context        tarsum.TarSum // the context is a tarball that is uploaded by the client
+	contextPath    string        // the path of the temporary directory the local context is unpacked to (server side)
+	noBaseImage    bool          // indicates that this build does not start from any base image, but is being built from an empty file system.
 }
 
 // Run the builder with the context. This is the lynchpin of this package. This
@@ -136,30 +139,10 @@ func (b *Builder) Run(context io.Reader) (string, error) {
 		}
 	}()
 
-	filename := path.Join(b.contextPath, "Dockerfile")
-
-	fi, err := os.Stat(filename)
-	if os.IsNotExist(err) {
-		return "", fmt.Errorf("Cannot build a directory without a Dockerfile")
-	}
-	if fi.Size() == 0 {
-		return "", ErrDockerfileEmpty
-	}
-
-	f, err := os.Open(filename)
-	if err != nil {
-		return "", err
-	}
-
-	defer f.Close()
-
-	ast, err := parser.Parse(f)
-	if err != nil {
+	if err := b.readDockerfile(b.dockerfileName); err != nil {
 		return "", err
 	}
 
-	b.dockerfile = ast
-
 	// some initializations that would not have been supplied by the caller.
 	b.Config = &runconfig.Config{}
 	b.TmpContainers = map[string]struct{}{}
@@ -185,6 +168,56 @@ func (b *Builder) Run(context io.Reader) (string, error) {
 	return b.image, nil
 }
 
+// Reads a Dockerfile from the current context. It assumes that the
+// 'filename' is a relative path from the root of the context
+func (b *Builder) readDockerfile(origFile string) error {
+	filename, err := symlink.FollowSymlinkInScope(filepath.Join(b.contextPath, origFile), b.contextPath)
+	if err != nil {
+		return fmt.Errorf("The Dockerfile (%s) must be within the build context", origFile)
+	}
+
+	fi, err := os.Lstat(filename)
+	if os.IsNotExist(err) {
+		return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile)
+	}
+	if fi.Size() == 0 {
+		return ErrDockerfileEmpty
+	}
+
+	f, err := os.Open(filename)
+	if err != nil {
+		return err
+	}
+
+	b.dockerfile, err = parser.Parse(f)
+	f.Close()
+
+	if err != nil {
+		return err
+	}
+
+	// After the Dockerfile has been parsed, we need to check the .dockerignore
+	// file for either "Dockerfile" or ".dockerignore", and if either are
+	// present then erase them from the build context. These files should never
+	// have been sent from the client but we did send them to make sure that
+	// we had the Dockerfile to actually parse, and then we also need the
+	// .dockerignore file to know whether either file should be removed.
+	// Note that this assumes the Dockerfile has been read into memory and
+	// is now safe to be removed.
+
+	excludes, _ := utils.ReadDockerIgnore(filepath.Join(b.contextPath, ".dockerignore"))
+	if rm, _ := fileutils.Matches(".dockerignore", excludes); rm == true {
+		os.Remove(filepath.Join(b.contextPath, ".dockerignore"))
+		b.context.(tarsum.BuilderContext).Remove(".dockerignore")
+	}
+	if rm, _ := fileutils.Matches(b.dockerfileName, excludes); rm == true {
+		os.Remove(filepath.Join(b.contextPath, b.dockerfileName))
+		b.context.(tarsum.BuilderContext).Remove(b.dockerfileName)
+	}
+
+	return nil
+}
+
 // This method is the entrypoint to all statement handling routines.
 //
 // Almost all nodes will have this structure:
@@ -212,6 +245,21 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
 		msg += " " + ast.Value
 	}
 
+	// count the number of nodes that we are going to traverse first
+	// so we can pre-create the argument and message array. This speeds up the
+	// allocation of those list a lot when they have a lot of arguments
+	cursor := ast
+	var n int
+	for cursor.Next != nil {
+		cursor = cursor.Next
+		n++
+	}
+	l := len(strs)
+	strList := make([]string, n+l)
+	copy(strList, strs)
+	msgList := make([]string, n)
+
+	var i int
 	for ast.Next != nil {
 		ast = ast.Next
 		var str string
@@ -219,16 +267,18 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
 		if _, ok := replaceEnvAllowed[cmd]; ok {
 			str = b.replaceEnv(ast.Value)
 		}
-		strs = append(strs, str)
-		msg += " " + ast.Value
+		strList[i+l] = str
+		msgList[i] = ast.Value
+		i++
 	}
 
+	msg += " " + strings.Join(msgList, " ")
 	fmt.Fprintln(b.OutStream, msg)
 
 	// XXX yes, we skip any cmds that are not valid; the parser should have
 	// picked these out already.
 	if f, ok := evaluateTable[cmd]; ok {
-		return f(b, strs, attrs, original)
+		return f(b, strList, attrs, original)
 	}
 
 	fmt.Fprintf(b.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd))

+ 48 - 31
builder/internals.go

@@ -25,6 +25,7 @@ import (
 	imagepkg "github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/system"
@@ -58,7 +59,7 @@ func (b *Builder) readContext(context io.Reader) error {
 }
 
 func (b *Builder) commit(id string, autoCmd []string, comment string) error {
-	if b.image == "" {
+	if b.image == "" && !b.noBaseImage {
 		return fmt.Errorf("Please provide a source image with `from` prior to commit")
 	}
 	b.Config.Image = b.image
@@ -217,6 +218,18 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
 	}
 	origPath = strings.TrimPrefix(origPath, "./")
 
+	// Twiddle the destPath when its a relative path - meaning, make it
+	// relative to the WORKINGDIR
+	if !filepath.IsAbs(destPath) {
+		hasSlash := strings.HasSuffix(destPath, "/")
+		destPath = filepath.Join("/", b.Config.WorkingDir, destPath)
+
+		// Make sure we preserve any trailing slash
+		if hasSlash {
+			destPath += "/"
+		}
+	}
+
 	// In the remote/URL case, download it and gen its hashcode
 	if urlutil.IsURL(origPath) {
 		if !allowRemote {
@@ -296,22 +309,20 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
 			ci.destPath = ci.destPath + filename
 		}
 
-		// Calc the checksum, only if we're using the cache
-		if b.UtilizeCache {
-			r, err := archive.Tar(tmpFileName, archive.Uncompressed)
-			if err != nil {
-				return err
-			}
-			tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
-			if err != nil {
-				return err
-			}
-			if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
-				return err
-			}
-			ci.hash = tarSum.Sum(nil)
-			r.Close()
+		// Calc the checksum, even if we're using the cache
+		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
+		if err != nil {
+			return err
+		}
+		tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
+		if err != nil {
+			return err
 		}
+		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
+			return err
+		}
+		ci.hash = tarSum.Sum(nil)
+		r.Close()
 
 		return nil
 	}
@@ -346,12 +357,6 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
 	ci.decompress = allowDecompression
 	*cInfos = append(*cInfos, &ci)
 
-	// If not using cache don't need to do anything else.
-	// If we are using a cache then calc the hash for the src file/dir
-	if !b.UtilizeCache {
-		return nil
-	}
-
 	// Deal with the single file case
 	if !fi.IsDir() {
 		// This will match first file in sums of the archive
@@ -378,7 +383,15 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
 
 	for _, fileInfo := range b.context.GetSums() {
 		absFile := path.Join(b.contextPath, fileInfo.Name())
-		if strings.HasPrefix(absFile, absOrigPath) || absFile == absOrigPathNoSlash {
+		// Any file in the context that starts with the given path will be
+		// picked up and its hashcode used.  However, we'll exclude the
+		// root dir itself.  We do this for a coupel of reasons:
+		// 1 - ADD/COPY will not copy the dir itself, just its children
+		//     so there's no reason to include it in the hash calc
+		// 2 - the metadata on the dir will change when any child file
+		//     changes.  This will lead to a miss in the cache check if that
+		//     child file is in the .dockerignore list.
+		if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
 			subfiles = append(subfiles, fileInfo.Sum())
 		}
 	}
@@ -407,21 +420,21 @@ func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
 	if tag == "" {
 		tag = "latest"
 	}
+	job := b.Engine.Job("pull", remote, tag)
 	pullRegistryAuth := b.AuthConfig
 	if len(b.AuthConfigFile.Configs) > 0 {
 		// The request came with a full auth config file, we prefer to use that
-		endpoint, _, err := registry.ResolveRepositoryName(remote)
+		repoInfo, err := registry.ResolveRepositoryInfo(job, remote)
 		if err != nil {
 			return nil, err
 		}
-		resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint)
+		resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(repoInfo.Index)
 		pullRegistryAuth = &resolvedAuth
 	}
-	job := b.Engine.Job("pull", remote, tag)
 	job.SetenvBool("json", b.StreamFormatter.Json())
 	job.SetenvBool("parallel", true)
 	job.SetenvJson("authConfig", pullRegistryAuth)
-	job.Stdout.Add(b.OutOld)
+	job.Stdout.Add(ioutils.NopWriteCloser(b.OutOld))
 	if err := job.Run(); err != nil {
 		return nil, err
 	}
@@ -501,7 +514,7 @@ func (b *Builder) probeCache() (bool, error) {
 }
 
 func (b *Builder) create() (*daemon.Container, error) {
-	if b.image == "" {
+	if b.image == "" && !b.noBaseImage {
 		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
 	}
 	b.Config.Image = b.image
@@ -520,9 +533,13 @@ func (b *Builder) create() (*daemon.Container, error) {
 	b.TmpContainers[c.ID] = struct{}{}
 	fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
 
-	// override the entry point that may have been picked up from the base image
-	c.Path = config.Cmd[0]
-	c.Args = config.Cmd[1:]
+	if len(config.Cmd) > 0 {
+		// override the entry point that may have been picked up from the base image
+		c.Path = config.Cmd[0]
+		c.Args = config.Cmd[1:]
+	} else {
+		config.Cmd = []string{}
+	}
 
 	return c, nil
 }

+ 10 - 2
builder/job.go

@@ -6,6 +6,7 @@ import (
 	"os"
 	"os/exec"
 
+	"github.com/docker/docker/api"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
@@ -30,6 +31,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		return job.Errorf("Usage: %s\n", job.Name)
 	}
 	var (
+		dockerfileName = job.Getenv("dockerfile")
 		remoteURL      = job.Getenv("remote")
 		repoName       = job.Getenv("t")
 		suppressOutput = job.GetenvBool("q")
@@ -42,12 +44,13 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		tag            string
 		context        io.ReadCloser
 	)
+
 	job.GetenvJson("authConfig", authConfig)
 	job.GetenvJson("configFile", configFile)
 
 	repoName, tag = parsers.ParseRepositoryTag(repoName)
 	if repoName != "" {
-		if _, _, err := registry.ResolveRepositoryName(repoName); err != nil {
+		if err := registry.ValidateRepositoryName(repoName); err != nil {
 			return job.Error(err)
 		}
 		if len(tag) > 0 {
@@ -57,6 +60,10 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		}
 	}
 
+	if dockerfileName == "" {
+		dockerfileName = api.DefaultDockerfileName
+	}
+
 	if remoteURL == "" {
 		context = ioutil.NopCloser(job.Stdin)
 	} else if urlutil.IsGitURL(remoteURL) {
@@ -88,7 +95,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		if err != nil {
 			return job.Error(err)
 		}
-		c, err := archive.Generate("Dockerfile", string(dockerFile))
+		c, err := archive.Generate(dockerfileName, string(dockerFile))
 		if err != nil {
 			return job.Error(err)
 		}
@@ -118,6 +125,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		StreamFormatter: sf,
 		AuthConfig:      authConfig,
 		AuthConfigFile:  configFile,
+		dockerfileName:  dockerfileName,
 	}
 
 	id, err := builder.Run(context)

+ 55 - 0
builder/parser/json_test.go

@@ -0,0 +1,55 @@
+package parser
+
+import (
+	"testing"
+)
+
+var invalidJSONArraysOfStrings = []string{
+	`["a",42,"b"]`,
+	`["a",123.456,"b"]`,
+	`["a",{},"b"]`,
+	`["a",{"c": "d"},"b"]`,
+	`["a",["c"],"b"]`,
+	`["a",true,"b"]`,
+	`["a",false,"b"]`,
+	`["a",null,"b"]`,
+}
+
+var validJSONArraysOfStrings = map[string][]string{
+	`[]`:           {},
+	`[""]`:         {""},
+	`["a"]`:        {"a"},
+	`["a","b"]`:    {"a", "b"},
+	`[ "a", "b" ]`: {"a", "b"},
+	`[	"a",	"b"	]`: {"a", "b"},
+	`	[	"a",	"b"	]	`: {"a", "b"},
+	`["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"},
+}
+
+func TestJSONArraysOfStrings(t *testing.T) {
+	for json, expected := range validJSONArraysOfStrings {
+		if node, _, err := parseJSON(json); err != nil {
+			t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err)
+		} else {
+			i := 0
+			for node != nil {
+				if i >= len(expected) {
+					t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json)
+				}
+				if node.Value != expected[i] {
+					t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i)
+				}
+				node = node.Next
+				i++
+			}
+			if i != len(expected) {
+				t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json)
+			}
+		}
+	}
+	for _, json := range invalidJSONArraysOfStrings {
+		if _, _, err := parseJSON(json); err != errDockerfileNotStringArray {
+			t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json)
+		}
+	}
+}

+ 16 - 24
builder/parser/line_parsers.go

@@ -10,13 +10,12 @@ import (
 	"encoding/json"
 	"errors"
 	"fmt"
-	"strconv"
 	"strings"
 	"unicode"
 )
 
 var (
-	errDockerfileJSONNesting = errors.New("You may not nest arrays in Dockerfile statements.")
+	errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.")
 )
 
 // ignore the current argument. This will still leave a command parsed, but
@@ -209,34 +208,27 @@ func parseString(rest string) (*Node, map[string]bool, error) {
 
 // parseJSON converts JSON arrays to an AST.
 func parseJSON(rest string) (*Node, map[string]bool, error) {
-	var (
-		myJson   []interface{}
-		next     = &Node{}
-		orignext = next
-		prevnode = next
-	)
-
+	var myJson []interface{}
 	if err := json.Unmarshal([]byte(rest), &myJson); err != nil {
 		return nil, nil, err
 	}
 
+	var top, prev *Node
 	for _, str := range myJson {
-		switch str.(type) {
-		case string:
-		case float64:
-			str = strconv.FormatFloat(str.(float64), 'G', -1, 64)
-		default:
-			return nil, nil, errDockerfileJSONNesting
+		if s, ok := str.(string); !ok {
+			return nil, nil, errDockerfileNotStringArray
+		} else {
+			node := &Node{Value: s}
+			if prev == nil {
+				top = node
+			} else {
+				prev.Next = node
+			}
+			prev = node
 		}
-		next.Value = str.(string)
-		next.Next = &Node{}
-		prevnode = next
-		next = next.Next
 	}
 
-	prevnode.Next = nil
-
-	return orignext, map[string]bool{"json": true}, nil
+	return top, map[string]bool{"json": true}, nil
 }
 
 // parseMaybeJSON determines if the argument appears to be a JSON array. If
@@ -250,7 +242,7 @@ func parseMaybeJSON(rest string) (*Node, map[string]bool, error) {
 	if err == nil {
 		return node, attrs, nil
 	}
-	if err == errDockerfileJSONNesting {
+	if err == errDockerfileNotStringArray {
 		return nil, nil, err
 	}
 
@@ -270,7 +262,7 @@ func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) {
 	if err == nil {
 		return node, attrs, nil
 	}
-	if err == errDockerfileJSONNesting {
+	if err == errDockerfileNotStringArray {
 		return nil, nil, err
 	}
 

+ 9 - 7
builder/parser/parser.go

@@ -3,6 +3,7 @@ package parser
 
 import (
 	"bufio"
+	"fmt"
 	"io"
 	"regexp"
 	"strings"
@@ -32,7 +33,7 @@ type Node struct {
 var (
 	dispatch                map[string]func(string) (*Node, map[string]bool, error)
 	TOKEN_WHITESPACE        = regexp.MustCompile(`[\t\v\f\r ]+`)
-	TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\\s*$`)
+	TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\[ \t]*$`)
 	TOKEN_COMMENT           = regexp.MustCompile(`^#.*$`)
 )
 
@@ -50,8 +51,8 @@ func init() {
 		"env":        parseEnv,
 		"maintainer": parseString,
 		"from":       parseString,
-		"add":        parseStringsWhitespaceDelimited,
-		"copy":       parseStringsWhitespaceDelimited,
+		"add":        parseMaybeJSONToList,
+		"copy":       parseMaybeJSONToList,
 		"run":        parseMaybeJSON,
 		"cmd":        parseMaybeJSON,
 		"entrypoint": parseMaybeJSON,
@@ -77,6 +78,10 @@ func parseLine(line string) (string, *Node, error) {
 		return "", nil, err
 	}
 
+	if len(args) == 0 {
+		return "", nil, fmt.Errorf("Instruction %q is empty; cannot continue", cmd)
+	}
+
 	node := &Node{}
 	node.Value = cmd
 
@@ -85,10 +90,7 @@ func parseLine(line string) (string, *Node, error) {
 		return "", nil, err
 	}
 
-	if sexp.Value != "" || sexp.Next != nil || sexp.Children != nil {
-		node.Next = sexp
-	}
-
+	node.Next = sexp
 	node.Attributes = attrs
 	node.Original = line
 

+ 2 - 9
builder/parser/parser_test.go

@@ -54,18 +54,14 @@ func TestTestData(t *testing.T) {
 		if err != nil {
 			t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error())
 		}
-
-		rf, err := os.Open(resultfile)
-		if err != nil {
-			t.Fatalf("Result file missing for %s: %s", dir.Name(), err.Error())
-		}
+		defer df.Close()
 
 		ast, err := Parse(df)
 		if err != nil {
 			t.Fatalf("Error parsing %s's dockerfile: %s", dir.Name(), err.Error())
 		}
 
-		content, err := ioutil.ReadAll(rf)
+		content, err := ioutil.ReadFile(resultfile)
 		if err != nil {
 			t.Fatalf("Error reading %s's result file: %s", dir.Name(), err.Error())
 		}
@@ -75,8 +71,5 @@ func TestTestData(t *testing.T) {
 			fmt.Fprintln(os.Stderr, "Expected:\n"+string(content))
 			t.Fatalf("%s: AST dump of dockerfile does not match result", dir.Name())
 		}
-
-		df.Close()
-		rf.Close()
 	}
 }

+ 8 - 0
builder/parser/testfiles-negative/empty-instruction/Dockerfile

@@ -0,0 +1,8 @@
+FROM dockerfile/rabbitmq
+
+RUN
+  rabbitmq-plugins enable \
+   rabbitmq_shovel \
+   rabbitmq_shovel_management \
+   rabbitmq_federation \
+   rabbitmq_federation_management

+ 9 - 0
builder/parser/testfiles/ADD-COPY-with-JSON/Dockerfile

@@ -0,0 +1,9 @@
+FROM	ubuntu:14.04
+MAINTAINER	Seongyeol Lim <seongyeol37@gmail.com>
+
+COPY	.	/go/src/github.com/docker/docker
+ADD		.	/
+ADD		[ "vimrc", "/tmp" ]
+COPY	[ "bashrc", "/tmp" ]
+COPY	[ "test file", "/tmp" ]
+ADD		[ "test file", "/tmp/test file" ]

+ 8 - 0
builder/parser/testfiles/ADD-COPY-with-JSON/result

@@ -0,0 +1,8 @@
+(from "ubuntu:14.04")
+(maintainer "Seongyeol Lim <seongyeol37@gmail.com>")
+(copy "." "/go/src/github.com/docker/docker")
+(add "." "/")
+(add "vimrc" "/tmp")
+(copy "bashrc" "/tmp")
+(copy "test file" "/tmp")
+(add "test file" "/tmp/test file")

+ 1 - 1
builder/parser/testfiles/brimstone-consuldock/result

@@ -2,4 +2,4 @@
 (maintainer "brimstone@the.narro.ws")
 (env "GOPATH" "/go")
 (entrypoint "/usr/local/bin/consuldock")
-(run "apt-get update 	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean     && apt-get install -y --no-install-recommends git golang ca-certificates     && apt-get clean     && rm -rf /var/lib/apt/lists 	&& go get -v github.com/brimstone/consuldock     && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock 	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty 	&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') 	&& rm /tmp/dpkg.* 	&& rm -rf $GOPATH")
+(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean     && apt-get install -y --no-install-recommends git golang ca-certificates     && apt-get clean     && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock     && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")

+ 3 - 3
builder/parser/testfiles/brimstone-docker-consul/result

@@ -2,8 +2,8 @@
 (cmd)
 (entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui")
 (expose "8500" "8600" "8400" "8301" "8302")
-(run "apt-get update     && apt-get install -y unzip wget 	&& apt-get clean 	&& rm -rf /var/lib/apt/lists")
+(run "apt-get update     && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists")
 (run "cd /tmp     && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip        -O web_ui.zip     && unzip web_ui.zip     && mv dist /webui     && rm web_ui.zip")
-(run "apt-get update 	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean     && apt-get install -y --no-install-recommends unzip wget     && apt-get clean     && rm -rf /var/lib/apt/lists     && cd /tmp     && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip        -O web_ui.zip     && unzip web_ui.zip     && mv dist /webui     && rm web_ui.zip 	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty 	&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') 	&& rm /tmp/dpkg.*")
+(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean     && apt-get install -y --no-install-recommends unzip wget     && apt-get clean     && rm -rf /var/lib/apt/lists     && cd /tmp     && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip        -O web_ui.zip     && unzip web_ui.zip     && mv dist /webui     && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*")
 (env "GOPATH" "/go")
-(run "apt-get update 	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean     && apt-get install -y --no-install-recommends git golang ca-certificates build-essential     && apt-get clean     && rm -rf /var/lib/apt/lists 	&& go get -v github.com/hashicorp/consul 	&& mv $GOPATH/bin/consul /usr/bin/consul 	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty 	&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') 	&& rm /tmp/dpkg.* 	&& rm -rf $GOPATH")
+(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean     && apt-get install -y --no-install-recommends git golang ca-certificates build-essential     && apt-get clean     && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")

+ 2 - 2
builder/parser/testfiles/docker/result

@@ -1,13 +1,13 @@
 (from "ubuntu:14.04")
 (maintainer "Tianon Gravi <admwiggin@gmail.com> (@tianon)")
-(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq 	apt-utils 	aufs-tools 	automake 	btrfs-tools 	build-essential 	curl 	dpkg-sig 	git 	iptables 	libapparmor-dev 	libcap-dev 	libsqlite3-dev 	lxc=1.0* 	mercurial 	pandoc 	parallel 	reprepro 	ruby1.9.1 	ruby1.9.1-dev 	s3cmd=1.1.0* 	--no-install-recommends")
+(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tlxc=1.0* \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends")
 (run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103")
 (run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper")
 (run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz")
 (env "PATH" "/usr/local/go/bin:$PATH")
 (env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor")
 (run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1")
-(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm 	darwin/amd64 darwin/386 	freebsd/amd64 freebsd/386 freebsd/arm")
+(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm")
 (env "GOARM" "5")
 (run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
 (run "go get golang.org/x/tools/cmd/cover")

+ 8 - 0
builder/parser/testfiles/json/Dockerfile

@@ -0,0 +1,8 @@
+CMD []
+CMD [""]
+CMD ["a"]
+CMD ["a","b"]
+CMD [ "a", "b" ]
+CMD [	"a",	"b"	]
+CMD	[	"a",	"b"	]	
+CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]

+ 8 - 0
builder/parser/testfiles/json/result

@@ -0,0 +1,8 @@
+(cmd)
+(cmd "")
+(cmd "a")
+(cmd "a" "b")
+(cmd "a" "b")
+(cmd "a" "b")
+(cmd "a" "b")
+(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00")

+ 2 - 22
builder/parser/utils.go

@@ -2,30 +2,10 @@ package parser
 
 import (
 	"fmt"
+	"strconv"
 	"strings"
 )
 
-// QuoteString walks characters (after trimming), escapes any quotes and
-// escapes, then wraps the whole thing in quotes. Very useful for generating
-// argument output in nodes.
-func QuoteString(str string) string {
-	result := ""
-	chars := strings.Split(strings.TrimSpace(str), "")
-
-	for _, char := range chars {
-		switch char {
-		case `"`:
-			result += `\"`
-		case `\`:
-			result += `\\`
-		default:
-			result += char
-		}
-	}
-
-	return `"` + result + `"`
-}
-
 // dumps the AST defined by `node` as a list of sexps. Returns a string
 // suitable for printing.
 func (node *Node) Dump() string {
@@ -41,7 +21,7 @@ func (node *Node) Dump() string {
 			if len(n.Children) > 0 {
 				str += " " + n.Dump()
 			} else {
-				str += " " + QuoteString(n.Value)
+				str += " " + strconv.Quote(n.Value)
 			}
 		}
 	}

+ 3 - 0
contrib/check-config.sh

@@ -138,6 +138,9 @@ flags=(
 	NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
 	NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
 	NF_NAT NF_NAT_NEEDED
+
+	# required for bind-mounting /dev/mqueue into containers
+	POSIX_MQUEUE
 )
 check_flags "${flags[@]}"
 echo

+ 232 - 176
contrib/completion/bash/docker

@@ -20,6 +20,11 @@
 # bound to the default communication port/socket
 # If the docker daemon is using a unix socket for communication your user
 # must have access to the socket for the completions to function correctly
+#
+# Note for developers:
+# Please arrange options sorted alphabetically by long name with the short 
+# options immediately following their corresponding long form.
+# This order should be applied to lists, alternatives and code blocks.
 
 __docker_q() {
 	docker 2>/dev/null "$@"
@@ -99,6 +104,22 @@ __docker_pos_first_nonflag() {
 	echo $counter
 }
 
+# Transforms a multiline list of strings into a single line string
+# with the words separated by "|".
+# This is used to prepare arguments to __docker_pos_first_nonflag().
+__docker_to_alternatives() {
+	local parts=( $1 )
+	local IFS='|'
+	echo "${parts[*]}"
+}
+
+# Transforms a multiline list of options into an extglob pattern
+# suitable for use in case statements.
+__docker_to_extglob() {
+	local extglob=$( __docker_to_alternatives "$1" )
+	echo "@($extglob)"
+}
+
 __docker_resolve_hostname() {
 	command -v host >/dev/null 2>&1 || return
 	COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') )
@@ -149,15 +170,47 @@ __docker_capabilities() {
 }
 
 _docker_docker() {
+	local boolean_options="
+		--api-enable-cors
+		--daemon -d
+		--debug -D
+		--help -h
+		--icc
+		--ip-forward
+		--ip-masq
+		--iptables
+		--ipv6
+		--selinux-enabled
+		--tls
+		--tlsverify
+		--version -v
+	"
+
 	case "$prev" in
-		-H)
+		--graph|-g)
+			_filedir -d
+			return
+			;;
+		--log-level|-l)
+			COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) )
+			return
+			;;
+		--pidfile|-p|--tlscacert|--tlscert|--tlskey)
+			_filedir
+			return
+			;;
+		--storage-driver|-s)
+			COMPREPLY=( $( compgen -W "aufs devicemapper btrfs overlay" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) )
+			return
+			;;
+		$main_options_with_args_glob )
 			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-H" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "$boolean_options $main_options_with_args" -- "$cur" ) )
 			;;
 		*)
 			COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
@@ -181,7 +234,7 @@ _docker_attach() {
 
 _docker_build() {
 	case "$prev" in
-		-t|--tag)
+		--tag|-t)
 			__docker_image_repos_and_tags
 			return
 			;;
@@ -189,10 +242,10 @@ _docker_build() {
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-t --tag -q --quiet --no-cache --rm --force-rm" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--force-rm --no-cache --quiet -q --rm --tag -t" -- "$cur" ) )
 			;;
 		*)
-			local counter="$(__docker_pos_first_nonflag '-t|--tag')"
+			local counter="$(__docker_pos_first_nonflag '--tag|-t')"
 			if [ $cword -eq $counter ]; then
 				_filedir -d
 			fi
@@ -202,17 +255,17 @@ _docker_build() {
 
 _docker_commit() {
 	case "$prev" in
-		-m|--message|-a|--author|--run)
+		--author|-a|--message|-m|--run)
 			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-m --message -a --author --run" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--author -a --message -m --run" -- "$cur" ) )
 			;;
 		*)
-			local counter=$(__docker_pos_first_nonflag '-m|--message|-a|--author|--run')
+			local counter=$(__docker_pos_first_nonflag '--author|-a|--message|-m|--run')
 
 			if [ $cword -eq $counter ]; then
 				__docker_containers_all
@@ -252,124 +305,7 @@ _docker_cp() {
 }
 
 _docker_create() {
-	case "$prev" in
-		-a|--attach)
-			COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) )
-			return
-			;;
-		--cidfile|--env-file)
-			_filedir
-			return
-			;;
-		--volumes-from)
-			__docker_containers_all
-			return
-			;;
-		-v|--volume|--device)
-			case "$cur" in
-				*:*)
-					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
-					;;
-				'')
-					COMPREPLY=( $( compgen -W '/' -- "$cur" ) )
-					compopt -o nospace
-					;;
-				/*)
-					_filedir
-					compopt -o nospace
-					;;
-			esac
-			return
-			;;
-		-e|--env)
-			COMPREPLY=( $( compgen -e -- "$cur" ) )
-			compopt -o nospace
-			return
-			;;
-		--link)
-			case "$cur" in
-				*:*)
-					;;
-				*)
-					__docker_containers_running
-					COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
-					compopt -o nospace
-					;;
-			esac
-			return
-			;;
-		--add-host)
-			case "$cur" in
-				*:)
-					__docker_resolve_hostname
-					return
-					;;
-			esac
-			;;
-		--cap-add|--cap-drop)
-			__docker_capabilities
-			return
-			;;
-		--net)
-			case "$cur" in
-				container:*)
-					local cur=${cur#*:}
-					__docker_containers_all
-					;;
-				*)
-					COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") )
-					if [ "${COMPREPLY[*]}" = "container:" ] ; then
-						compopt -o nospace
-					fi
-					;;
-			esac
-			return
-			;;
-		--restart)
-			case "$cur" in
-				on-failure:*)
-					;;
-				*)
-					COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") )
-					;;
-			esac
-			return
-			;;
-		--security-opt)
-			case "$cur" in
-				label:*:*)
-					;;
-				label:*)
-					local cur=${cur##*:}
-					COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") )
-					if [ "${COMPREPLY[*]}" != "disable" ] ; then
-						compopt -o nospace
-					fi
-					;;
-				*)
-					COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") )
-					compopt -o nospace
-					;;
-			esac
-			return
-			;;
-		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search)
-			return
-			;;
-	esac
-
-	case "$cur" in
-		-*)
-			COMPREPLY=( $( compgen -W "--privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) )
-			;;
-		*)
-			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart')
-
-			if [ $cword -eq $counter ]; then
-				__docker_image_repos_and_tags_and_ids
-			fi
-			;;
-	esac
+	_docker_run
 }
 
 _docker_diff() {
@@ -396,7 +332,7 @@ _docker_events() {
 _docker_exec() {
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-d --detach -i --interactive -t --tty" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--detach -d --interactive -i -t --tty" -- "$cur" ) )
 			;;
 		*)
 			__docker_containers_running
@@ -421,7 +357,7 @@ _docker_help() {
 _docker_history() {
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-q --quiet --no-trunc" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--no-trunc --quiet -q" -- "$cur" ) )
 			;;
 		*)
 			local counter=$(__docker_pos_first_nonflag)
@@ -435,7 +371,7 @@ _docker_history() {
 _docker_images() {
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-q --quiet -a --all --no-trunc -v --viz -t --tree" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--all -a --no-trunc --quiet -q" -- "$cur" ) )
 			;;
 		*)
 			local counter=$(__docker_pos_first_nonflag)
@@ -465,14 +401,14 @@ _docker_info() {
 
 _docker_inspect() {
 	case "$prev" in
-		-f|--format)
+		--format|-f)
 			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-f --format" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--format -f" -- "$cur" ) )
 			;;
 		*)
 			__docker_containers_and_images
@@ -485,19 +421,30 @@ _docker_kill() {
 }
 
 _docker_load() {
-	return
+	case "$prev" in
+		--input|-i)
+			_filedir
+			return
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--input -i" -- "$cur" ) )
+			;;
+	esac
 }
 
 _docker_login() {
 	case "$prev" in
-		-u|--username|-p|--password|-e|--email)
+		--email|-e|--password|-p|--username|-u)
 			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--email -e --password -p --username -u" -- "$cur" ) )
 			;;
 	esac
 }
@@ -505,7 +452,7 @@ _docker_login() {
 _docker_logs() {
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-f --follow" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--follow -f" -- "$cur" ) )
 			;;
 		*)
 			local counter=$(__docker_pos_first_nonflag)
@@ -532,7 +479,7 @@ _docker_port() {
 
 _docker_ps() {
 	case "$prev" in
-		--since|--before)
+		--before|--since)
 			__docker_containers_all
 			;;
 		-n)
@@ -542,24 +489,24 @@ _docker_ps() {
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--all -a --before --latest -l --no-trunc -n --quiet -q --size -s --since" -- "$cur" ) )
 			;;
 	esac
 }
 
 _docker_pull() {
 	case "$prev" in
-		-t|--tag)
+		--tag|-t)
 			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-t --tag" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--tag -t" -- "$cur" ) )
 			;;
 		*)
-			local counter=$(__docker_pos_first_nonflag '-t|--tag')
+			local counter=$(__docker_pos_first_nonflag '--tag|-t')
 			if [ $cword -eq $counter ]; then
 				__docker_image_repos_and_tags
 			fi
@@ -576,14 +523,14 @@ _docker_push() {
 
 _docker_restart() {
 	case "$prev" in
-		-t|--time)
+		--time|-t)
 			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--time -t" -- "$cur" ) )
 			;;
 		*)
 			__docker_containers_all
@@ -594,13 +541,13 @@ _docker_restart() {
 _docker_rm() {
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-f --force -l --link -v --volumes" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--force -f --link -l --volumes -v" -- "$cur" ) )
 			return
 			;;
 		*)
 			for arg in "${COMP_WORDS[@]}"; do
 				case "$arg" in
-					-f|--force)
+					--force|-f)
 						__docker_containers_all
 						return
 						;;
@@ -617,20 +564,75 @@ _docker_rmi() {
 }
 
 _docker_run() {
+	local options_with_args="
+		--add-host
+		--attach -a
+		--cap-add
+		--cap-drop
+		--cidfile
+		--cpuset
+		--cpu-shares -c
+		--device
+		--dns
+		--dns-search
+		--entrypoint
+		--env -e
+		--env-file
+		--expose
+		--hostname -h
+		--ipc
+		--link
+		--lxc-conf
+		--mac-address
+		--memory -m
+		--name
+		--net
+		--publish -p
+		--restart
+		--security-opt
+		--user -u
+		--volumes-from
+		--volume -v
+		--workdir -w
+	"
+
+	local all_options="$options_with_args
+		--interactive -i
+		--privileged
+		--publish-all -P
+		--tty -t
+	"
+
+	[ "$command" = "run" ] && all_options="$all_options
+		--detach -d
+		--rm
+		--sig-proxy
+	"
+
+	local options_with_args_glob=$(__docker_to_extglob "$options_with_args")
+
 	case "$prev" in
-		-a|--attach)
+		--add-host)
+			case "$cur" in
+				*:)
+					__docker_resolve_hostname
+					return
+					;;
+			esac
+			;;
+		--attach|-a)
 			COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) )
 			return
 			;;
-		--cidfile|--env-file)
-			_filedir
+		--cap-add|--cap-drop)
+			__docker_capabilities
 			return
 			;;
-		--volumes-from)
-			__docker_containers_all
+		--cidfile|--env-file)
+			_filedir
 			return
 			;;
-		-v|--volume|--device)
+		--device|-d|--volume)
 			case "$cur" in
 				*:*)
 					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
@@ -646,33 +648,36 @@ _docker_run() {
 			esac
 			return
 			;;
-		-e|--env)
+		--env|-e)
 			COMPREPLY=( $( compgen -e -- "$cur" ) )
 			compopt -o nospace
 			return
 			;;
-		--link)
+		--ipc)
 			case "$cur" in
 				*:*)
+					cur="${cur#*:}"
+					__docker_containers_running
 					;;
 				*)
-					__docker_containers_running
-					COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
-					compopt -o nospace
+					COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) )
+					if [ "$COMPREPLY" = "container:" ]; then
+						compopt -o nospace
+					fi
 					;;
 			esac
 			return
 			;;
-		--add-host)
+		--link)
 			case "$cur" in
-				*:)
-					__docker_resolve_hostname
-					return
+				*:*)
+					;;
+				*)
+					__docker_containers_running
+					COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
+					compopt -o nospace
 					;;
 			esac
-			;;
-		--cap-add|--cap-drop)
-			__docker_capabilities
 			return
 			;;
 		--net)
@@ -718,17 +723,21 @@ _docker_run() {
 			esac
 			return
 			;;
-		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search)
+		--volumes-from)
+			__docker_containers_all
+			return
+			;;
+		$options_with_args_glob )
 			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--rm -d --detach --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) )
 			;;
 		*)
-			local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart')
+			local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) )
 
 			if [ $cword -eq $counter ]; then
 				__docker_image_repos_and_tags_and_ids
@@ -738,22 +747,33 @@ _docker_run() {
 }
 
 _docker_save() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		__docker_image_repos_and_tags_and_ids
-	fi
+	case "$prev" in
+		--output|-o)
+			_filedir
+			return
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-o --output" -- "$cur" ) )
+			;;
+		*)
+			__docker_image_repos_and_tags_and_ids
+			;;
+	esac
 }
 
 _docker_search() {
 	case "$prev" in
-		-s|--stars)
+		--stars|-s)
 			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--automated --no-trunc --stars -s" -- "$cur" ) )
 			;;
 	esac
 }
@@ -761,7 +781,7 @@ _docker_search() {
 _docker_start() {
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-a --attach -i --interactive" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--attach -a --interactive -i" -- "$cur" ) )
 			;;
 		*)
 			__docker_containers_stopped
@@ -769,16 +789,20 @@ _docker_start() {
 	esac
 }
 
+_docker_stats() {
+	__docker_containers_running
+}
+
 _docker_stop() {
 	case "$prev" in
-		-t|--time)
+		--time|-t)
 			return
 			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--time -t" -- "$cur" ) )
 			;;
 		*)
 			__docker_containers_running
@@ -789,7 +813,7 @@ _docker_stop() {
 _docker_tag() {
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-f --force" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--force -f" -- "$cur" ) )
 			;;
 		*)
 			local counter=$(__docker_pos_first_nonflag)
@@ -831,6 +855,9 @@ _docker_wait() {
 }
 
 _docker() {
+	local previous_extglob_setting=$(shopt -p extglob)
+	shopt -s extglob
+
 	local commands=(
 		attach
 		build
@@ -863,6 +890,7 @@ _docker() {
 		save
 		search
 		start
+		stats
 		stop
 		tag
 		top
@@ -871,6 +899,33 @@ _docker() {
 		wait
 	)
 
+	local main_options_with_args="
+		--bip
+		--bridge -b
+		--dns
+		--dns-search
+		--exec-driver -e
+		--fixed-cidr
+		--fixed-cidr-v6
+		--graph -g
+		--group -G
+		--host -H
+		--insecure-registry
+		--ip
+		--label
+		--log-level -l
+		--mtu
+		--pidfile -p
+		--registry-mirror
+		--storage-driver -s
+		--storage-opt
+		--tlscacert
+		--tlscert
+		--tlskey
+	"
+
+	local main_options_with_args_glob=$(__docker_to_extglob "$main_options_with_args")
+
 	COMPREPLY=()
 	local cur prev words cword
 	_get_comp_words_by_ref -n : cur prev words cword
@@ -879,7 +934,7 @@ _docker() {
 	local counter=1
 	while [ $counter -lt $cword ]; do
 		case "${words[$counter]}" in
-			-H)
+			$main_options_with_args_glob )
 				(( counter++ ))
 				;;
 			-*)
@@ -897,6 +952,7 @@ _docker() {
 	local completions_func=_docker_${command}
 	declare -F $completions_func >/dev/null && $completions_func
 
+	eval "$previous_extglob_setting"
 	return 0
 }
 

+ 186 - 77
contrib/completion/fish/docker.fish

@@ -16,7 +16,7 @@
 
 function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
     for i in (commandline -opc)
-        if contains -- $i attach build commit cp create diff events export history images import info insert inspect kill load login logs port ps pull push restart rm rmi run save search start stop tag top version wait
+        if contains -- $i attach build commit cp create diff events exec export history images import info insert inspect kill load login logout logs pause port ps pull push restart rm rmi run save search start stop tag top unpause version wait
             return 1
         end
     end
@@ -43,92 +43,142 @@ function __fish_print_docker_repositories --description 'Print a list of docker
 end
 
 # common options
-complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'
-complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group"
-complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API'
-complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d "Attach containers to a pre-existing network bridge; use 'none' to disable container networking"
+complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b"
+complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode'
-complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers'
-complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver'
-complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the Docker runtime to use a specific exec driver'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports'
-complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward'
-complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules"
-complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range"
+complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules"
+complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level (debug, info, warn, error, fatal)'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file'
-complete -c docker -f -n '__fish_docker_no_subcommand' -s r -l restart -d 'Restart previously running containers'
-complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the docker runtime to use a specific storage driver'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify flag'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit'
 
 # subcommands
 # attach
 complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach stdin'
-complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (non-TTY mode only)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN'
+complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container"
 
 # build
 complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile'
+complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)"
 complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds'
+complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image'
+complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image'
 complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers'
 complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build'
 complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success'
 
 # commit
 complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes"
-complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith <hannibal@a-team.com>"'
+complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")'
+complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message'
-complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')'
+complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit'
 complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container"
 
 # cp
 complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path"
+complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage'
 
 # create
-complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to the host interfaces'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to stdin, stdout or stderr.'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom dns servers'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default entrypoint of the image'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port from the container without publishing it to your host'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: <number><optional unit>, where unit = b, k, m or g)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)"
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image"
-
+complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s c -l cpu-shares -d 'CPU shares (relative weight)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)"
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of <name|id>:alias'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l lxc-conf -d '(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: <number><optional unit>, where unit = b, k, m or g)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: <number><optional unit>, where unit = b, k, m or g)"
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host"
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only"
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image"
 
 # diff
 complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem"
+complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container"
 
 # events
 complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server'
-complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show previously created events and then stream.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')"
+complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp'
+complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp'
+
+# exec
+complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background'
+complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached'
+complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY'
+complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container"
 
 # export
 complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive'
+complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container"
 
 # history
 complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image'
+complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output"
 complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs'
 complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image"
@@ -136,51 +186,70 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_pr
 # images
 complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images'
 complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')"
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output"
 complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs'
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'Output graph in tree format'
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'Output graph in graphviz format'
 complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository"
 
 # import
 complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball'
+complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage'
 
 # info
 complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information'
 
 # inspect
-complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image'
 complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image"
 complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container"
 
 # kill
 complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container"
 
 # load
 complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive'
+complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN'
 
 # login
-complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or Login to the docker registry server'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or log in to a Docker registry server'
 complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email'
+complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password'
 complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username'
 
+# logout
+complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server'
+
 # logs
 complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output'
+complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps'
+complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container"
 
 # port
-complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port which is NAT-ed to PRIVATE_PORT'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT'
+complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container"
 
+# pause
+complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container"
+
 # ps
 complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:'
+complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output"
@@ -189,97 +258,137 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Di
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.'
 
 # pull
-complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from the docker registry server'
-complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s t -l tag -d 'Download tagged image in repository'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server'
+complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository'
+complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image"
 complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository"
 
 # push
-complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to the docker registry server'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server'
+complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image"
 complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository"
 
+# rename
+complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container'
+
 # restart
 complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a running container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10'
+complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container"
 
 # rm
 complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers'
-complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force removal of running container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated to the container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container"
 
 # rmi
 complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images'
-complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents'
 complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image"
 
 # run
 complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to the host interfaces'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to stdin, stdout or stderr.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: Run container in the background, print new container id'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom dns servers'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)"
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default entrypoint of the image'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port from the container without publishing it to your host'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of <name|id>:alias'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d '(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: <number><optional unit>, where unit = b, k, m or g)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: <number><optional unit>, where unit = b, k, m or g)"
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)"
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host"
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only"
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (non-TTY mode only)'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID'
-complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image"
 
 # save
 complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive'
+complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT'
 complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image"
 
 # search
-complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image in the docker index'
-complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output"
-complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least xxx stars'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the Docker Hub'
 complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds'
+complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output"
+complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars'
 
 # start
 complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's stdout/stderr and forward all signals to the process"
-complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's stdin"
+complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process"
+complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN"
 complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container"
 
+# stats
+complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics"
+complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container"
+
 # stop
 complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a running container'
-complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container"
 
 # tag
 complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository'
 complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force'
-complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -a '(__fish_print_docker_images)' -d "Image"
+complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage'
 
 # top
 complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container"
 
+# unpause
+complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container'
+complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container"
+
 # version
-complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the docker version information'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information'
 
 # wait
 complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code'
+complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container"
 
 

+ 1 - 0
contrib/init/systemd/docker.service

@@ -6,6 +6,7 @@ Requires=docker.socket
 
 [Service]
 ExecStart=/usr/bin/docker -d -H fd://
+MountFlags=slave
 LimitNOFILE=1048576
 LimitNPROC=1048576
 

+ 2 - 1
contrib/init/sysvinit-redhat/docker

@@ -23,6 +23,7 @@
 . /etc/rc.d/init.d/functions
 
 prog="docker"
+unshare=/usr/bin/unshare
 exec="/usr/bin/$prog"
 pidfile="/var/run/$prog.pid"
 lockfile="/var/lock/subsys/$prog"
@@ -46,7 +47,7 @@ start() {
         prestart
         printf "Starting $prog:\t"
         echo "\n$(date)\n" >> $logfile
-        $exec -d $other_args &>> $logfile &
+        "$unshare" -m -- $exec -d $other_args &>> $logfile &
         pid=$!
         touch $lockfile
         # wait up to 10 seconds for the pidfile to exist.  see

+ 17 - 0
contrib/init/upstart/docker.conf

@@ -39,3 +39,20 @@ script
 	fi
 	exec "$DOCKER" -d $DOCKER_OPTS
 end script
+
+# Don't emit "started" event until docker.sock is ready.
+# See https://github.com/docker/docker/issues/6647
+post-start script
+	DOCKER_OPTS=
+	if [ -f /etc/default/$UPSTART_JOB ]; then
+		. /etc/default/$UPSTART_JOB
+	fi
+	if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then
+		while ! [ -e /var/run/docker.sock ]; do
+			initctl status $UPSTART_JOB | grep -q "stop/" && exit 1
+			echo "Waiting for /var/run/docker.sock"
+			sleep 0.1
+		done
+		echo "/var/run/docker.sock is up"
+	fi
+end script

+ 28 - 2
contrib/mkimage-arch.sh

@@ -18,7 +18,32 @@ ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX)
 chmod 755 $ROOTFS
 
 # packages to ignore for space savings
-PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs
+PKGIGNORE=(
+    cryptsetup
+    device-mapper
+    dhcpcd
+    iproute2
+    jfsutils
+    linux
+    lvm2
+    man-db
+    man-pages
+    mdadm
+    nano
+    netctl
+    openresolv
+    pciutils
+    pcmciautils
+    reiserfsprogs
+    s-nail
+    systemd-sysvcompat
+    usbutils
+    vi
+    xfsprogs
+)
+IFS=','
+PKGIGNORE="${PKGIGNORE[*]}"
+unset IFS
 
 expect <<EOF
 	set send_slow {1 .1}
@@ -36,7 +61,8 @@ expect <<EOF
 	}
 EOF
 
-arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux"
+arch-chroot $ROOTFS /bin/sh -c 'rm -r /usr/share/man/*'
+arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux; pkill gpg-agent"
 arch-chroot $ROOTFS /bin/sh -c "ln -s /usr/share/zoneinfo/UTC /etc/localtime"
 echo 'en_US.UTF-8 UTF-8' > $ROOTFS/etc/locale.gen
 arch-chroot $ROOTFS locale-gen

+ 5 - 0
contrib/mkimage/debootstrap

@@ -49,6 +49,11 @@ chmod +x "$rootfsDir/usr/sbin/policy-rc.d"
 # shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB)
 ( set -x; chroot "$rootfsDir" apt-get clean )
 
+# this file is one APT creates to make sure we don't "autoremove" our currently
+# in-use kernel, which doesn't really apply to debootstraps/Docker images that
+# don't even have kernels installed
+rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels"
+
 # Ubuntu 10.04 sucks... :)
 if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then
 	# force dpkg not to call sync() after package extraction (speeding up installs)

+ 3 - 2
contrib/nuke-graph-directory.sh

@@ -50,9 +50,10 @@ for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do
 done
 
 # now, let's go destroy individual btrfs subvolumes, if any exist
-if command -v btrfs &> /dev/null; then
+if command -v btrfs > /dev/null 2>&1; then
 	root="$(df "$dir" | awk 'NR>1 { print $NF }')"
-	for subvol in $(btrfs subvolume list -o "$root" 2>/dev/null | awk -F' path ' '{ print $2 }'); do
+	root="${root#/}" # if root is "/", we want it to become ""
+	for subvol in $(btrfs subvolume list -o "$root/" 2>/dev/null | awk -F' path ' '{ print $2 }' | sort -r); do
 		subvolDir="$root/$subvol"
 		if dir_in_dir "$subvolDir" "$dir"; then
 			( set -x; btrfs subvolume delete "$subvolDir" )

+ 4 - 1
contrib/syntax/vim/README.md

@@ -5,8 +5,11 @@ Syntax highlighting for Dockerfiles
 
 Installation
 ------------
+With [pathogen](https://github.com/tpope/vim-pathogen), the usual way...
 
-Via pathogen, the usual way...
+With [Vundle](https://github.com/gmarik/Vundle.vim)
+  
+    Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'}
 
 Features
 --------

+ 76 - 112
daemon/attach.go

@@ -4,11 +4,11 @@ import (
 	"encoding/json"
 	"io"
 	"os"
+	"sync"
 	"time"
 
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/jsonlog"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/utils"
@@ -114,137 +114,101 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
 	var (
 		cStdout, cStderr io.ReadCloser
-		nJobs            int
+		cStdin           io.WriteCloser
+		wg               sync.WaitGroup
 		errors           = make(chan error, 3)
 	)
 
-	// Connect stdin of container to the http conn.
 	if stdin != nil && openStdin {
-		nJobs++
-		// Get the stdin pipe.
-		if cStdin, err := streamConfig.StdinPipe(); err != nil {
-			errors <- err
-		} else {
-			go func() {
-				log.Debugf("attach: stdin: begin")
-				defer log.Debugf("attach: stdin: end")
-				if stdinOnce && !tty {
-					defer cStdin.Close()
-				} else {
-					// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
-					defer func() {
-						if cStdout != nil {
-							cStdout.Close()
-						}
-						if cStderr != nil {
-							cStderr.Close()
-						}
-					}()
-				}
-				if tty {
-					_, err = utils.CopyEscapable(cStdin, stdin)
-				} else {
-					_, err = io.Copy(cStdin, stdin)
-
-				}
-				if err == io.ErrClosedPipe {
-					err = nil
-				}
-				if err != nil {
-					log.Errorf("attach: stdin: %s", err)
-				}
-				errors <- err
-			}()
-		}
+		cStdin = streamConfig.StdinPipe()
+		wg.Add(1)
 	}
+
 	if stdout != nil {
-		nJobs++
-		// Get a reader end of a pipe that is attached as stdout to the container.
-		if p, err := streamConfig.StdoutPipe(); err != nil {
-			errors <- err
-		} else {
-			cStdout = p
-			go func() {
-				log.Debugf("attach: stdout: begin")
-				defer log.Debugf("attach: stdout: end")
-				// If we are in StdinOnce mode, then close stdin
-				if stdinOnce && stdin != nil {
-					defer stdin.Close()
-				}
-				_, err := io.Copy(stdout, cStdout)
-				if err == io.ErrClosedPipe {
-					err = nil
-				}
-				if err != nil {
-					log.Errorf("attach: stdout: %s", err)
-				}
-				errors <- err
-			}()
-		}
-	} else {
-		// Point stdout of container to a no-op writer.
-		go func() {
-			if cStdout, err := streamConfig.StdoutPipe(); err != nil {
-				log.Errorf("attach: stdout pipe: %s", err)
-			} else {
-				io.Copy(&ioutils.NopWriter{}, cStdout)
-			}
-		}()
+		cStdout = streamConfig.StdoutPipe()
+		wg.Add(1)
 	}
+
 	if stderr != nil {
-		nJobs++
-		if p, err := streamConfig.StderrPipe(); err != nil {
-			errors <- err
-		} else {
-			cStderr = p
-			go func() {
-				log.Debugf("attach: stderr: begin")
-				defer log.Debugf("attach: stderr: end")
-				// If we are in StdinOnce mode, then close stdin
-				// Why are we closing stdin here and above while handling stdout?
-				if stdinOnce && stdin != nil {
-					defer stdin.Close()
-				}
-				_, err := io.Copy(stderr, cStderr)
-				if err == io.ErrClosedPipe {
-					err = nil
-				}
-				if err != nil {
-					log.Errorf("attach: stderr: %s", err)
-				}
-				errors <- err
-			}()
+		cStderr = streamConfig.StderrPipe()
+		wg.Add(1)
+	}
+
+	// Connect stdin of container to the http conn.
+	go func() {
+		if stdin == nil || !openStdin {
+			return
 		}
-	} else {
-		// Point stderr at a no-op writer.
-		go func() {
-			if cStderr, err := streamConfig.StderrPipe(); err != nil {
-				log.Errorf("attach: stdout pipe: %s", err)
+		log.Debugf("attach: stdin: begin")
+		defer func() {
+			if stdinOnce && !tty {
+				cStdin.Close()
 			} else {
-				io.Copy(&ioutils.NopWriter{}, cStderr)
+				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
+				if cStdout != nil {
+					cStdout.Close()
+				}
+				if cStderr != nil {
+					cStderr.Close()
+				}
 			}
+			wg.Done()
+			log.Debugf("attach: stdin: end")
 		}()
-	}
 
-	return promise.Go(func() error {
+		var err error
+		if tty {
+			_, err = utils.CopyEscapable(cStdin, stdin)
+		} else {
+			_, err = io.Copy(cStdin, stdin)
+
+		}
+		if err == io.ErrClosedPipe {
+			err = nil
+		}
+		if err != nil {
+			log.Errorf("attach: stdin: %s", err)
+			errors <- err
+			return
+		}
+	}()
+
+	attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
+		if stream == nil {
+			return
+		}
 		defer func() {
-			if cStdout != nil {
-				cStdout.Close()
-			}
-			if cStderr != nil {
-				cStderr.Close()
+			// Make sure stdin gets closed
+			if stdin != nil {
+				stdin.Close()
 			}
+			streamPipe.Close()
+			wg.Done()
+			log.Debugf("attach: %s: end", name)
 		}()
 
-		for i := 0; i < nJobs; i++ {
-			log.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
-			if err := <-errors; err != nil {
-				log.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
+		log.Debugf("attach: %s: begin", name)
+		_, err := io.Copy(stream, streamPipe)
+		if err == io.ErrClosedPipe {
+			err = nil
+		}
+		if err != nil {
+			log.Errorf("attach: %s: %v", name, err)
+			errors <- err
+		}
+	}
+
+	go attachStream("stdout", stdout, cStdout)
+	go attachStream("stderr", stderr, cStderr)
+
+	return promise.Go(func() error {
+		wg.Wait()
+		close(errors)
+		for err := range errors {
+			if err != nil {
 				return err
 			}
-			log.Debugf("attach: job %d completed successfully", i+1)
 		}
-		log.Debugf("attach: all jobs completed successfully")
 		return nil
 	})
 }

+ 4 - 4
daemon/commit.go

@@ -59,17 +59,17 @@ func (daemon *Daemon) Commit(container *Container, repository, tag, comment, aut
 
 	// Create a new image from the container's base layers + a new layer from container changes
 	var (
-		containerID, containerImage string
-		containerConfig             *runconfig.Config
+		containerID, parentImageID string
+		containerConfig            *runconfig.Config
 	)
 
 	if container != nil {
 		containerID = container.ID
-		containerImage = container.Image
+		parentImageID = container.ImageID
 		containerConfig = container.Config
 	}
 
-	img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
+	img, err := daemon.graph.Create(rwTar, containerID, parentImageID, comment, author, containerConfig, config)
 	if err != nil {
 		return nil, err
 	}

+ 6 - 14
daemon/config.go

@@ -23,7 +23,7 @@ type Config struct {
 	AutoRestart                 bool
 	Dns                         []string
 	DnsSearch                   []string
-	Mirrors                     []string
+	EnableIPv6                  bool
 	EnableIptables              bool
 	EnableIpForward             bool
 	EnableIpMasq                bool
@@ -31,7 +31,7 @@ type Config struct {
 	BridgeIface                 string
 	BridgeIP                    string
 	FixedCIDR                   string
-	InsecureRegistries          []string
+	FixedCIDRv6                 string
 	InterContainerCommunication bool
 	GraphDriver                 string
 	GraphOptions                []string
@@ -53,12 +53,13 @@ func (config *Config) InstallFlags() {
 	flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime")
 	flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run")
 	flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules")
-	flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
+	flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.")
 	flag.BoolVar(&config.EnableIpMasq, []string{"-ip-masq"}, true, "Enable IP masquerading for bridge's IP range")
+	flag.BoolVar(&config.EnableIPv6, []string{"-ipv6"}, false, "Enable IPv6 networking")
 	flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
 	flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
-	flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)")
-	opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)")
+	flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)")
+	flag.StringVar(&config.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", "IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)")
 	flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Allow unrestricted inter-container and Docker daemon host communication")
 	flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver")
 	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver")
@@ -69,16 +70,7 @@ func (config *Config) InstallFlags() {
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
 	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
 	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
-	opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror")
 	opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon (displayed in `docker info`)")
-
-	// Localhost is by default considered as an insecure registry
-	// This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker).
-	//
-	// TODO: should we deprecate this once it is easier for people to set up a TLS registry or change
-	// daemon flags on boot2docker?
-	// If so, do not forget to check the TODO in TestIsSecure
-	config.InsecureRegistries = append(config.InsecureRegistries, "127.0.0.0/8")
 }
 
 func getDefaultNetworkMtu() int {

+ 155 - 41
daemon/container.go

@@ -62,8 +62,8 @@ type Container struct {
 	Path string
 	Args []string
 
-	Config *runconfig.Config
-	Image  string
+	Config  *runconfig.Config
+	ImageID string `json:"Image"`
 
 	NetworkSettings *NetworkSettings
 
@@ -81,6 +81,7 @@ type Container struct {
 	MountLabel, ProcessLabel string
 	AppArmorProfile          string
 	RestartCount             int
+	UpdateDns                bool
 
 	// Maps container paths to volume paths.  The key in this is the path to which
 	// the volume is being mounted inside the container.  Value is the path of the
@@ -91,9 +92,10 @@ type Container struct {
 	VolumesRW  map[string]bool
 	hostConfig *runconfig.HostConfig
 
-	activeLinks  map[string]*links.Link
-	monitor      *containerMonitor
-	execCommands *execStore
+	activeLinks        map[string]*links.Link
+	monitor            *containerMonitor
+	execCommands       *execStore
+	AppliedVolumesFrom map[string]struct{}
 }
 
 func (container *Container) FromDisk() error {
@@ -186,7 +188,7 @@ func (container *Container) WriteHostConfig() error {
 
 func (container *Container) LogEvent(action string) {
 	d := container.daemon
-	if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil {
+	if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.ImageID)).Run(); err != nil {
 		log.Errorf("Error logging event %s for %s: %s", action, container.ID, err)
 	}
 }
@@ -216,11 +218,15 @@ func populateCommand(c *Container, env []string) error {
 		if !c.Config.NetworkDisabled {
 			network := c.NetworkSettings
 			en.Interface = &execdriver.NetworkInterface{
-				Gateway:     network.Gateway,
-				Bridge:      network.Bridge,
-				IPAddress:   network.IPAddress,
-				IPPrefixLen: network.IPPrefixLen,
-				MacAddress:  network.MacAddress,
+				Gateway:              network.Gateway,
+				Bridge:               network.Bridge,
+				IPAddress:            network.IPAddress,
+				IPPrefixLen:          network.IPPrefixLen,
+				MacAddress:           network.MacAddress,
+				LinkLocalIPv6Address: network.LinkLocalIPv6Address,
+				GlobalIPv6Address:    network.GlobalIPv6Address,
+				GlobalIPv6PrefixLen:  network.GlobalIPv6PrefixLen,
+				IPv6Gateway:          network.IPv6Gateway,
 			}
 		}
 	case "container":
@@ -245,6 +251,9 @@ func populateCommand(c *Container, env []string) error {
 		ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
 	}
 
+	pid := &execdriver.Pid{}
+	pid.HostPid = c.hostConfig.PidMode.IsHost()
+
 	// Build lists of devices allowed and created within the container.
 	userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices))
 	for i, deviceMapping := range c.hostConfig.Devices {
@@ -286,10 +295,12 @@ func populateCommand(c *Container, env []string) error {
 	c.command = &execdriver.Command{
 		ID:                 c.ID,
 		Rootfs:             c.RootfsPath(),
+		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
 		InitPath:           "/.dockerinit",
 		WorkingDir:         c.Config.WorkingDir,
 		Network:            en,
 		Ipc:                ipc,
+		Pid:                pid,
 		Resources:          resources,
 		AllowedDevices:     allowedDevices,
 		AutoCreatedDevices: autoCreatedDevices,
@@ -370,10 +381,7 @@ func (container *Container) Run() error {
 }
 
 func (container *Container) Output() (output []byte, err error) {
-	pipe, err := container.StdoutPipe()
-	if err != nil {
-		return nil, err
-	}
+	pipe := container.StdoutPipe()
 	defer pipe.Close()
 	if err := container.Start(); err != nil {
 		return nil, err
@@ -391,20 +399,20 @@ func (container *Container) Output() (output []byte, err error) {
 // copied and delivered to all StdoutPipe and StderrPipe consumers, using
 // a kind of "broadcaster".
 
-func (streamConfig *StreamConfig) StdinPipe() (io.WriteCloser, error) {
-	return streamConfig.stdinPipe, nil
+func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser {
+	return streamConfig.stdinPipe
 }
 
-func (streamConfig *StreamConfig) StdoutPipe() (io.ReadCloser, error) {
+func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser {
 	reader, writer := io.Pipe()
 	streamConfig.stdout.AddWriter(writer, "")
-	return ioutils.NewBufReader(reader), nil
+	return ioutils.NewBufReader(reader)
 }
 
-func (streamConfig *StreamConfig) StderrPipe() (io.ReadCloser, error) {
+func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser {
 	reader, writer := io.Pipe()
 	streamConfig.stderr.AddWriter(writer, "")
-	return ioutils.NewBufReader(reader), nil
+	return ioutils.NewBufReader(reader)
 }
 
 func (streamConfig *StreamConfig) StdoutLogPipe() io.ReadCloser {
@@ -542,12 +550,17 @@ func (container *Container) AllocateNetwork() error {
 	container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen")
 	container.NetworkSettings.MacAddress = env.Get("MacAddress")
 	container.NetworkSettings.Gateway = env.Get("Gateway")
+	container.NetworkSettings.LinkLocalIPv6Address = env.Get("LinkLocalIPv6")
+	container.NetworkSettings.LinkLocalIPv6PrefixLen = 64
+	container.NetworkSettings.GlobalIPv6Address = env.Get("GlobalIPv6")
+	container.NetworkSettings.GlobalIPv6PrefixLen = env.GetInt("GlobalIPv6PrefixLen")
+	container.NetworkSettings.IPv6Gateway = env.Get("IPv6Gateway")
 
 	return nil
 }
 
 func (container *Container) ReleaseNetwork() {
-	if container.Config.NetworkDisabled {
+	if container.Config.NetworkDisabled || !container.hostConfig.NetworkMode.IsPrivate() {
 		return
 	}
 	eng := container.daemon.eng
@@ -786,7 +799,7 @@ func (container *Container) GetImage() (*image.Image, error) {
 	if container.daemon == nil {
 		return nil, fmt.Errorf("Can't get image of unregistered container")
 	}
-	return container.daemon.graph.Get(container.Image)
+	return container.daemon.graph.Get(container.ImageID)
 }
 
 func (container *Container) Unmount() error {
@@ -891,8 +904,8 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
 	}
 
 	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
-		Compression: archive.Uncompressed,
-		Includes:    filter,
+		Compression:  archive.Uncompressed,
+		IncludeFiles: filter,
 	})
 	if err != nil {
 		container.Unmount()
@@ -945,6 +958,29 @@ func (container *Container) DisableLink(name string) {
 
 func (container *Container) setupContainerDns() error {
 	if container.ResolvConfPath != "" {
+		// check if this is an existing container that needs DNS update:
+		if container.UpdateDns {
+			// read the host's resolv.conf, get the hash and call updateResolvConf
+			log.Debugf("Check container (%s) for update to resolv.conf - UpdateDns flag was set", container.ID)
+			latestResolvConf, latestHash := resolvconf.GetLastModified()
+
+			// clean container resolv.conf re: localhost nameservers and IPv6 NS (if IPv6 disabled)
+			updatedResolvConf, modified := resolvconf.FilterResolvDns(latestResolvConf, container.daemon.config.EnableIPv6)
+			if modified {
+				// changes have occurred during resolv.conf localhost cleanup: generate an updated hash
+				newHash, err := utils.HashData(bytes.NewReader(updatedResolvConf))
+				if err != nil {
+					return err
+				}
+				latestHash = newHash
+			}
+
+			if err := container.updateResolvConf(updatedResolvConf, latestHash); err != nil {
+				return err
+			}
+			// successful update of the restarting container; set the flag off
+			container.UpdateDns = false
+		}
 		return nil
 	}
 
@@ -982,32 +1018,106 @@ func (container *Container) setupContainerDns() error {
 			return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch)
 		}
 
-		// replace any localhost/127.* nameservers
-		resolvConf = utils.RemoveLocalDns(resolvConf)
-		// if the resulting resolvConf is empty, use DefaultDns
-		if !bytes.Contains(resolvConf, []byte("nameserver")) {
-			log.Infof("No non localhost DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v", DefaultDns)
-			// prefix the default dns options with nameserver
-			resolvConf = append(resolvConf, []byte("\nnameserver "+strings.Join(DefaultDns, "\nnameserver "))...)
-		}
+		// replace any localhost/127.*, and remove IPv6 nameservers if IPv6 disabled in daemon
+		resolvConf, _ = resolvconf.FilterResolvDns(resolvConf, daemon.config.EnableIPv6)
+	}
+	//get a sha256 hash of the resolv conf at this point so we can check
+	//for changes when the host resolv.conf changes (e.g. network update)
+	resolvHash, err := utils.HashData(bytes.NewReader(resolvConf))
+	if err != nil {
+		return err
+	}
+	resolvHashFile := container.ResolvConfPath + ".hash"
+	if err = ioutil.WriteFile(resolvHashFile, []byte(resolvHash), 0644); err != nil {
+		return err
 	}
 	return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644)
 }
 
-func (container *Container) updateParentsHosts() error {
-	parents, err := container.daemon.Parents(container.Name)
+// called when the host's resolv.conf changes to check whether container's resolv.conf
+// is unchanged by the container "user" since container start: if unchanged, the
+// container's resolv.conf will be updated to match the host's new resolv.conf
+func (container *Container) updateResolvConf(updatedResolvConf []byte, newResolvHash string) error {
+
+	if container.ResolvConfPath == "" {
+		return nil
+	}
+	if container.Running {
+		//set a marker in the hostConfig to update on next start/restart
+		container.UpdateDns = true
+		return nil
+	}
+
+	resolvHashFile := container.ResolvConfPath + ".hash"
+
+	//read the container's current resolv.conf and compute the hash
+	resolvBytes, err := ioutil.ReadFile(container.ResolvConfPath)
 	if err != nil {
 		return err
 	}
-	for _, cid := range parents {
-		if cid == "0" {
-			continue
+	curHash, err := utils.HashData(bytes.NewReader(resolvBytes))
+	if err != nil {
+		return err
+	}
+
+	//read the hash from the last time we wrote resolv.conf in the container
+	hashBytes, err := ioutil.ReadFile(resolvHashFile)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return err
+		}
+		// backwards compat: if no hash file exists, this container pre-existed from
+		// a Docker daemon that didn't contain this update feature. Given we can't know
+		// if the user has modified the resolv.conf since container start time, safer
+		// to just never update the container's resolv.conf during it's lifetime which
+		// we can control by setting hashBytes to an empty string
+		hashBytes = []byte("")
+	}
+
+	//if the user has not modified the resolv.conf of the container since we wrote it last
+	//we will replace it with the updated resolv.conf from the host
+	if string(hashBytes) == curHash {
+		log.Debugf("replacing %q with updated host resolv.conf", container.ResolvConfPath)
+
+		// for atomic updates to these files, use temporary files with os.Rename:
+		dir := path.Dir(container.ResolvConfPath)
+		tmpHashFile, err := ioutil.TempFile(dir, "hash")
+		if err != nil {
+			return err
+		}
+		tmpResolvFile, err := ioutil.TempFile(dir, "resolv")
+		if err != nil {
+			return err
 		}
 
-		c := container.daemon.Get(cid)
+		// write the updates to the temp files
+		if err = ioutil.WriteFile(tmpHashFile.Name(), []byte(newResolvHash), 0644); err != nil {
+			return err
+		}
+		if err = ioutil.WriteFile(tmpResolvFile.Name(), updatedResolvConf, 0644); err != nil {
+			return err
+		}
+
+		// rename the temp files for atomic replace
+		if err = os.Rename(tmpHashFile.Name(), resolvHashFile); err != nil {
+			return err
+		}
+		return os.Rename(tmpResolvFile.Name(), container.ResolvConfPath)
+	}
+	return nil
+}
+
+func (container *Container) updateParentsHosts() error {
+	refs := container.daemon.ContainerGraph().RefPaths(container.ID)
+	for _, ref := range refs {
+		if ref.ParentID == "0" {
+			continue
+		}
+		c := container.daemon.Get(ref.ParentID)
 		if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
-			if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, container.Name[1:]); err != nil {
-				log.Errorf("Failed to update /etc/hosts in parent container: %v", err)
+			log.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
+			if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, ref.Name); err != nil {
+				log.Errorf("Failed to update /etc/hosts in parent container %s for alias %s: %v", c.ID, ref.Name, err)
 			}
 		}
 	}
@@ -1301,3 +1411,7 @@ func (container *Container) getNetworkedContainer() (*Container, error) {
 		return nil, fmt.Errorf("network mode not set to container")
 	}
 }
+
+func (container *Container) Stats() (*execdriver.ResourceStats, error) {
+	return container.daemon.Stats(container)
+}

+ 22 - 11
daemon/create.go

@@ -5,6 +5,7 @@ import (
 
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
+	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/libcontainer/label"
@@ -29,6 +30,9 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
 		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
 		config.MemorySwap = -1
 	}
+	if config.Memory > 0 && config.MemorySwap > 0 && config.MemorySwap < config.Memory {
+		return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
+	}
 
 	var hostConfig *runconfig.HostConfig
 	if job.EnvExists("HostConfig") {
@@ -68,31 +72,38 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
 	var (
 		container *Container
 		warnings  []string
+		img       *image.Image
+		imgID     string
+		err       error
 	)
 
-	img, err := daemon.repositories.LookupImage(config.Image)
-	if err != nil {
-		return nil, nil, err
-	}
-	if err := img.CheckDepth(); err != nil {
-		return nil, nil, err
+	if config.Image != "" {
+		img, err = daemon.repositories.LookupImage(config.Image)
+		if err != nil {
+			return nil, nil, err
+		}
+		if err = img.CheckDepth(); err != nil {
+			return nil, nil, err
+		}
+		imgID = img.ID
 	}
+
 	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
 		return nil, nil, err
 	}
 	if hostConfig != nil && hostConfig.SecurityOpt == nil {
-		hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode)
+		hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
 		if err != nil {
 			return nil, nil, err
 		}
 	}
-	if container, err = daemon.newContainer(name, config, img); err != nil {
+	if container, err = daemon.newContainer(name, config, imgID); err != nil {
 		return nil, nil, err
 	}
 	if err := daemon.Register(container); err != nil {
 		return nil, nil, err
 	}
-	if err := daemon.createRootfs(container, img); err != nil {
+	if err := daemon.createRootfs(container); err != nil {
 		return nil, nil, err
 	}
 	if hostConfig != nil {
@@ -113,8 +124,8 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
 	return container, warnings, nil
 }
 
-func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode) ([]string, error) {
-	if ipcMode.IsHost() {
+func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
+	if ipcMode.IsHost() || pidMode.IsHost() {
 		return label.DisableSecOpt(), nil
 	}
 	if ipcContainer := ipcMode.Container(); ipcContainer != "" {

+ 125 - 36
daemon/daemon.go

@@ -1,11 +1,13 @@
 package daemon
 
 import (
+	"bytes"
 	"fmt"
 	"io"
 	"io/ioutil"
 	"os"
 	"path"
+	"path/filepath"
 	"regexp"
 	"runtime"
 	"strings"
@@ -32,6 +34,7 @@ import (
 	"github.com/docker/docker/pkg/graphdb"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/namesgenerator"
+	"github.com/docker/docker/pkg/networkfs/resolvconf"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/sysinfo"
@@ -40,10 +43,11 @@ import (
 	"github.com/docker/docker/trust"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/volumes"
+
+	"github.com/go-fsnotify/fsnotify"
 )
 
 var (
-	DefaultDns                = []string{"8.8.8.8", "8.8.4.4"}
 	validContainerNameChars   = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
 	validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
 )
@@ -100,6 +104,7 @@ type Daemon struct {
 	driver         graphdriver.Driver
 	execDriver     execdriver.Driver
 	trustStore     *trust.TrustStore
+	statsCollector *statsCollector
 }
 
 // Install installs daemon capabilities to eng.
@@ -110,7 +115,9 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
 		"commit":            daemon.ContainerCommit,
 		"container_changes": daemon.ContainerChanges,
 		"container_copy":    daemon.ContainerCopy,
+		"container_rename":  daemon.ContainerRename,
 		"container_inspect": daemon.ContainerInspect,
+		"container_stats":   daemon.ContainerStats,
 		"containers":        daemon.Containers,
 		"create":            daemon.ContainerCreate,
 		"rm":                daemon.ContainerRm,
@@ -151,12 +158,18 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
 // Get looks for a container by the specified ID or name, and returns it.
 // If the container is not found, or if an error occurs, nil is returned.
 func (daemon *Daemon) Get(name string) *Container {
-	if id, err := daemon.idIndex.Get(name); err == nil {
+	id, err := daemon.idIndex.Get(name)
+	if err == nil {
 		return daemon.containers.Get(id)
 	}
+
 	if c, _ := daemon.GetByName(name); c != nil {
 		return c
 	}
+
+	if err == truncindex.ErrDuplicateID {
+		log.Errorf("Short ID %s is ambiguous: please retry with more characters or use the full ID.\n", name)
+	}
 	return nil
 }
 
@@ -227,6 +240,8 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 	// we'll waste time if we update it for every container
 	daemon.idIndex.Add(container.ID)
 
+	container.registerVolumes()
+
 	// FIXME: if the container is supposed to be running but is not, auto restart it?
 	//        if so, then we need to restart monitor and init a new lock
 	// If the container is supposed to be running, make sure of it
@@ -234,7 +249,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 		log.Debugf("killing old running container %s", container.ID)
 
 		existingPid := container.Pid
-		container.SetStopped(&execdriver.ExitStatus{0, false})
+		container.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
 
 		// We only have to handle this for lxc because the other drivers will ensure that
 		// no processes are left when docker dies
@@ -266,7 +281,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 
 			log.Debugf("Marking as stopped")
 
-			container.SetStopped(&execdriver.ExitStatus{-127, false})
+			container.SetStopped(&execdriver.ExitStatus{ExitCode: -127})
 			if err := container.ToDisk(); err != nil {
 				return err
 			}
@@ -390,10 +405,6 @@ func (daemon *Daemon) restore() error {
 		}
 	}
 
-	for _, c := range registeredContainers {
-		c.registerVolumes()
-	}
-
 	if !debug {
 		fmt.Println()
 		log.Infof("Loading containers: done.")
@@ -402,6 +413,60 @@ func (daemon *Daemon) restore() error {
 	return nil
 }
 
+// set up the watch on the host's /etc/resolv.conf so that we can update container's
+// live resolv.conf when the network changes on the host
+func (daemon *Daemon) setupResolvconfWatcher() error {
+
+	watcher, err := fsnotify.NewWatcher()
+	if err != nil {
+		return err
+	}
+
+	//this goroutine listens for the events on the watch we add
+	//on the resolv.conf file on the host
+	go func() {
+		for {
+			select {
+			case event := <-watcher.Events:
+				if event.Op&fsnotify.Write == fsnotify.Write {
+					// verify a real change happened before we go further--a file write may have happened
+					// without an actual change to the file
+					updatedResolvConf, newResolvConfHash, err := resolvconf.GetIfChanged()
+					if err != nil {
+						log.Debugf("Error retrieving updated host resolv.conf: %v", err)
+					} else if updatedResolvConf != nil {
+						// because the new host resolv.conf might have localhost nameservers..
+						updatedResolvConf, modified := resolvconf.FilterResolvDns(updatedResolvConf, daemon.config.EnableIPv6)
+						if modified {
+							// changes have occurred during localhost cleanup: generate an updated hash
+							newHash, err := utils.HashData(bytes.NewReader(updatedResolvConf))
+							if err != nil {
+								log.Debugf("Error generating hash of new resolv.conf: %v", err)
+							} else {
+								newResolvConfHash = newHash
+							}
+						}
+						log.Debugf("host network resolv.conf changed--walking container list for updates")
+						contList := daemon.containers.List()
+						for _, container := range contList {
+							if err := container.updateResolvConf(updatedResolvConf, newResolvConfHash); err != nil {
+								log.Debugf("Error on resolv.conf update check for container ID: %s: %v", container.ID, err)
+							}
+						}
+					}
+				}
+			case err := <-watcher.Errors:
+				log.Debugf("host resolv.conf notify error: %v", err)
+			}
+		}
+	}()
+
+	if err := watcher.Add("/etc/resolv.conf"); err != nil {
+		return err
+	}
+	return nil
+}
+
 func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
 	if config != nil {
 		if config.PortSpecs != nil {
@@ -417,10 +482,10 @@ func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
 
 func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) {
 	warnings := []string{}
-	if daemon.checkDeprecatedExpose(img.Config) || daemon.checkDeprecatedExpose(config) {
+	if (img != nil && daemon.checkDeprecatedExpose(img.Config)) || daemon.checkDeprecatedExpose(config) {
 		warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.")
 	}
-	if img.Config != nil {
+	if img != nil && img.Config != nil {
 		if err := runconfig.Merge(config, img.Config); err != nil {
 			return nil, err
 		}
@@ -478,8 +543,8 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
 		} else {
 			nameAsKnownByUser := strings.TrimPrefix(name, "/")
 			return "", fmt.Errorf(
-				"Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser,
-				utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser)
+				"Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser,
+				utils.TruncateID(conflictingContainer.ID))
 		}
 	}
 	return name, nil
@@ -557,7 +622,7 @@ func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error
 	return err
 }
 
-func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) {
+func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID string) (*Container, error) {
 	var (
 		id  string
 		err error
@@ -578,7 +643,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i
 		Args:            args, //FIXME: de-duplicate from config
 		Config:          config,
 		hostConfig:      &runconfig.HostConfig{},
-		Image:           img.ID, // Always use the resolved image id
+		ImageID:         imgID,
 		NetworkSettings: &NetworkSettings{},
 		Name:            name,
 		Driver:          daemon.driver.String(),
@@ -590,14 +655,14 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i
 	return container, err
 }
 
-func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error {
+func (daemon *Daemon) createRootfs(container *Container) error {
 	// Step 1: create the container directory.
 	// This doubles as a barrier to avoid race conditions.
 	if err := os.Mkdir(container.root, 0700); err != nil {
 		return err
 	}
 	initID := fmt.Sprintf("%s-init", container.ID)
-	if err := daemon.driver.Create(initID, img.ID); err != nil {
+	if err := daemon.driver.Create(initID, container.ImageID); err != nil {
 		return err
 	}
 	initPath, err := daemon.driver.Get(initID, "")
@@ -689,10 +754,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 			if err != nil {
 				return err
 			}
-			child, err := daemon.GetByName(parts["name"])
-			if err != nil {
-				return err
-			}
+			child := daemon.Get(parts["name"])
 			if child == nil {
 				return fmt.Errorf("Could not get container for %s", parts["name"])
 			}
@@ -758,7 +820,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 	if os.Geteuid() != 0 {
 		return nil, fmt.Errorf("The Docker daemon needs to be run as root")
 	}
-	if err := checkKernelAndArch(); err != nil {
+	if err := checkKernel(); err != nil {
 		return nil, err
 	}
 
@@ -829,13 +891,18 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 		return nil, err
 	}
 
-	volumes, err := volumes.NewRepository(path.Join(config.Root, "volumes"), volumesDriver)
+	volumes, err := volumes.NewRepository(filepath.Join(config.Root, "volumes"), volumesDriver)
+	if err != nil {
+		return nil, err
+	}
+
+	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
 	if err != nil {
 		return nil, err
 	}
 
 	log.Debugf("Creating repository list")
-	repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, config.Mirrors, config.InsecureRegistries)
+	repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, trustKey)
 	if err != nil {
 		return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
 	}
@@ -856,9 +923,11 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 		job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication)
 		job.SetenvBool("EnableIpForward", config.EnableIpForward)
 		job.SetenvBool("EnableIpMasq", config.EnableIpMasq)
+		job.SetenvBool("EnableIPv6", config.EnableIPv6)
 		job.Setenv("BridgeIface", config.BridgeIface)
 		job.Setenv("BridgeIP", config.BridgeIP)
 		job.Setenv("FixedCIDR", config.FixedCIDR)
+		job.Setenv("FixedCIDRv6", config.FixedCIDRv6)
 		job.Setenv("DefaultBindingIP", config.DefaultIp.String())
 
 		if err := job.Run(); err != nil {
@@ -898,11 +967,6 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 		return nil, err
 	}
 
-	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
-	if err != nil {
-		return nil, err
-	}
-
 	daemon := &Daemon{
 		ID:             trustKey.PublicKey().KeyID(),
 		repository:     daemonRepo,
@@ -920,10 +984,17 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 		execDriver:     ed,
 		eng:            eng,
 		trustStore:     t,
+		statsCollector: newStatsCollector(1 * time.Second),
 	}
 	if err := daemon.restore(); err != nil {
 		return nil, err
 	}
+
+	// set up filesystem watch on resolv.conf for network changes
+	if err := daemon.setupResolvconfWatcher(); err != nil {
+		return nil, err
+	}
+
 	// Setup shutdown handlers
 	// FIXME: can these shutdown handlers be registered closer to their source?
 	eng.OnShutdown(func() {
@@ -1024,6 +1095,28 @@ func (daemon *Daemon) Kill(c *Container, sig int) error {
 	return daemon.execDriver.Kill(c.command, sig)
 }
 
+func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) {
+	return daemon.execDriver.Stats(c.ID)
+}
+
+func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) {
+	c := daemon.Get(name)
+	if c == nil {
+		return nil, fmt.Errorf("no such container")
+	}
+	ch := daemon.statsCollector.collect(c)
+	return ch, nil
+}
+
+func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error {
+	c := daemon.Get(name)
+	if c == nil {
+		return fmt.Errorf("no such container")
+	}
+	daemon.statsCollector.unsubscribe(c, ch)
+	return nil
+}
+
 // Nuke kills all containers then removes all content
 // from the content root, including images, volumes and
 // container filesystems.
@@ -1099,9 +1192,9 @@ func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*i
 	// Loop on the children of the given image and check the config
 	var match *image.Image
 	for elem := range imageMap[imgID] {
-		img, err := daemon.Graph().Get(elem)
-		if err != nil {
-			return nil, err
+		img, ok := images[elem]
+		if !ok {
+			return nil, fmt.Errorf("unable to find image %q", elem)
 		}
 		if runconfig.Compare(&img.ContainerConfig, config) {
 			if match == nil || match.Created.Before(img.Created) {
@@ -1112,11 +1205,7 @@ func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*i
 	return match, nil
 }
 
-func checkKernelAndArch() error {
-	// Check for unsupported architectures
-	if runtime.GOARCH != "amd64" {
-		return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
-	}
+func checkKernel() error {
 	// Check for unsupported kernel versions
 	// FIXME: it would be cleaner to not test for specific versions, but rather
 	// test for specific functionalities.

+ 4 - 1
daemon/delete.go

@@ -49,13 +49,16 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
 	}
 
 	if container != nil {
+		// stop collection of stats for the container regardless
+		// if stats are currently getting collected.
+		daemon.statsCollector.stopCollection(container)
 		if container.IsRunning() {
 			if forceRemove {
 				if err := container.Kill(); err != nil {
 					return job.Errorf("Could not kill running container, cannot remove - %v", err)
 				}
 			} else {
-				return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f")
+				return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
 			}
 		}
 		if err := daemon.Destroy(container); err != nil {

+ 21 - 5
daemon/exec.go

@@ -1,5 +1,3 @@
-// build linux
-
 package daemon
 
 import (
@@ -35,7 +33,7 @@ type execConfig struct {
 
 type execStore struct {
 	s map[string]*execConfig
-	sync.Mutex
+	sync.RWMutex
 }
 
 func newExecStore() *execStore {
@@ -49,9 +47,9 @@ func (e *execStore) Add(id string, execConfig *execConfig) {
 }
 
 func (e *execStore) Get(id string) *execConfig {
-	e.Lock()
+	e.RLock()
 	res := e.s[id]
-	e.Unlock()
+	e.RUnlock()
 	return res
 }
 
@@ -61,6 +59,16 @@ func (e *execStore) Delete(id string) {
 	e.Unlock()
 }
 
+func (e *execStore) List() []string {
+	var IDs []string
+	e.RLock()
+	for id := range e.s {
+		IDs = append(IDs, id)
+	}
+	e.RUnlock()
+	return IDs
+}
+
 func (execConfig *execConfig) Resize(h, w int) error {
 	return execConfig.ProcessConfig.Terminal.Resize(h, w)
 }
@@ -144,6 +152,8 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
 		Running:       false,
 	}
 
+	container.LogEvent("exec_create: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " "))
+
 	d.registerExecCommand(execConfig)
 
 	job.Printf("%s\n", execConfig.ID)
@@ -182,6 +192,8 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
 	log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
 	container := execConfig.Container
 
+	container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " "))
+
 	if execConfig.OpenStdin {
 		r, w := io.Pipe()
 		go func() {
@@ -249,6 +261,10 @@ func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pi
 	return exitStatus, err
 }
 
+func (container *Container) GetExecIDs() []string {
+	return container.execCommands.List()
+}
+
 func (container *Container) Exec(execConfig *execConfig) error {
 	container.Lock()
 	defer container.Unlock()

+ 28 - 7
daemon/execdriver/driver.go

@@ -5,7 +5,9 @@ import (
 	"io"
 	"os"
 	"os/exec"
+	"time"
 
+	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/devices"
 )
 
@@ -14,7 +16,7 @@ import (
 type Context map[string]string
 
 var (
-	ErrNotRunning              = errors.New("Process could not be started")
+	ErrNotRunning              = errors.New("Container is not running")
 	ErrWaitTimeoutReached      = errors.New("Wait timeout reached")
 	ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function")
 	ErrDriverNotFound          = errors.New("The requested docker init has not been found")
@@ -61,6 +63,7 @@ type Driver interface {
 	GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container.
 	Terminate(c *Command) error                   // kill it with fire
 	Clean(id string) error                        // clean all traces of container exec
+	Stats(id string) (*ResourceStats, error)      // Get resource stats for a running container
 }
 
 // Network settings of the container
@@ -77,12 +80,21 @@ type Ipc struct {
 	HostIpc     bool   `json:"host_ipc"`
 }
 
+// PID settings of the container
+type Pid struct {
+	HostPid bool `json:"host_pid"`
+}
+
 type NetworkInterface struct {
-	Gateway     string `json:"gateway"`
-	IPAddress   string `json:"ip"`
-	IPPrefixLen int    `json:"ip_prefix_len"`
-	MacAddress  string `json:"mac_address"`
-	Bridge      string `json:"bridge"`
+	Gateway              string `json:"gateway"`
+	IPAddress            string `json:"ip"`
+	IPPrefixLen          int    `json:"ip_prefix_len"`
+	MacAddress           string `json:"mac"`
+	Bridge               string `json:"bridge"`
+	GlobalIPv6Address    string `json:"global_ipv6"`
+	LinkLocalIPv6Address string `json:"link_local_ipv6"`
+	GlobalIPv6PrefixLen  int    `json:"global_ipv6_prefix_len"`
+	IPv6Gateway          string `json:"ipv6_gateway"`
 }
 
 type Resources struct {
@@ -92,6 +104,13 @@ type Resources struct {
 	Cpuset     string `json:"cpuset"`
 }
 
+type ResourceStats struct {
+	*libcontainer.ContainerStats
+	Read        time.Time `json:"read"`
+	MemoryLimit int64     `json:"memory_limit"`
+	SystemUsage uint64    `json:"system_usage"`
+}
+
 type Mount struct {
 	Source      string `json:"source"`
 	Destination string `json:"destination"`
@@ -116,12 +135,14 @@ type ProcessConfig struct {
 // Process wrapps an os/exec.Cmd to add more metadata
 type Command struct {
 	ID                 string            `json:"id"`
-	Rootfs             string            `json:"rootfs"`   // root fs of the container
+	Rootfs             string            `json:"rootfs"` // root fs of the container
+	ReadonlyRootfs     bool              `json:"readonly_rootfs"`
 	InitPath           string            `json:"initpath"` // dockerinit
 	WorkingDir         string            `json:"working_dir"`
 	ConfigPath         string            `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
 	Network            *Network          `json:"network"`
 	Ipc                *Ipc              `json:"ipc"`
+	Pid                *Pid              `json:"pid"`
 	Resources          *Resources        `json:"resources"`
 	Mounts             []Mount           `json:"mounts"`
 	AllowedDevices     []*devices.Device `json:"allowed_devices"`

+ 2 - 1
daemon/execdriver/execdrivers/execdrivers.go

@@ -2,11 +2,12 @@ package execdrivers
 
 import (
 	"fmt"
+	"path"
+
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver/lxc"
 	"github.com/docker/docker/daemon/execdriver/native"
 	"github.com/docker/docker/pkg/sysinfo"
-	"path"
 )
 
 func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {

+ 11 - 6
daemon/execdriver/lxc/driver.go

@@ -76,11 +76,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	})
 
 	if err := d.generateEnvConfig(c); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	configPath, err := d.generateLXCConfig(c)
 	if err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	params := []string{
 		"lxc-start",
@@ -154,11 +154,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	c.ProcessConfig.Args = append([]string{name}, arg...)
 
 	if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 
 	if err := c.ProcessConfig.Start(); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 
 	var (
@@ -182,7 +182,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 			c.ProcessConfig.Process.Kill()
 			c.ProcessConfig.Wait()
 		}
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 
 	c.ContainerPid = pid
@@ -193,7 +193,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 
 	<-waitLock
 
-	return execdriver.ExitStatus{getExitCode(c), false}, waitErr
+	return execdriver.ExitStatus{ExitCode: getExitCode(c)}, waitErr
 }
 
 /// Return the exit code of the process
@@ -524,3 +524,8 @@ func (t *TtyConsole) Close() error {
 func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
 	return -1, ErrExec
 }
+
+func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
+	return nil, fmt.Errorf("container stats are not supported with LXC")
+
+}

+ 5 - 4
daemon/execdriver/lxc/lxc_init_linux.go

@@ -2,6 +2,8 @@ package lxc
 
 import (
 	"fmt"
+
+	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/namespaces"
 	"github.com/docker/libcontainer/utils"
 )
@@ -10,14 +12,13 @@ func finalizeNamespace(args *InitArgs) error {
 	if err := utils.CloseExecFrom(3); err != nil {
 		return err
 	}
-
-	if err := namespaces.SetupUser(args.User); err != nil {
+	if err := namespaces.SetupUser(&libcontainer.Config{
+		User: args.User,
+	}); err != nil {
 		return fmt.Errorf("setup user %s", err)
 	}
-
 	if err := setupWorkingDirectory(args); err != nil {
 		return err
 	}
-
 	return nil
 }

+ 77 - 35
daemon/execdriver/lxc/lxc_template.go

@@ -1,12 +1,17 @@
 package lxc
 
 import (
-	"github.com/docker/docker/daemon/execdriver"
-	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
-	"github.com/docker/libcontainer/label"
+	"fmt"
 	"os"
 	"strings"
 	"text/template"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/daemon/execdriver"
+	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
+	"github.com/docker/docker/utils"
+	"github.com/docker/libcontainer/label"
+	"github.com/docker/libcontainer/security/capabilities"
 )
 
 const LxcTemplate = `
@@ -16,12 +21,6 @@ lxc.network.type = veth
 lxc.network.link = {{.Network.Interface.Bridge}}
 lxc.network.name = eth0
 lxc.network.mtu = {{.Network.Mtu}}
-{{if .Network.Interface.IPAddress}}
-lxc.network.ipv4 = {{.Network.Interface.IPAddress}}/{{.Network.Interface.IPPrefixLen}}
-{{end}}
-{{if .Network.Interface.Gateway}}
-lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}}
-{{end}}
 lxc.network.flags = up
 {{else if .Network.HostNetworking}}
 lxc.network.type = none
@@ -62,13 +61,24 @@ lxc.cgroup.devices.allow = {{$allowedDevice.GetCgroupAllowString}}
 lxc.pivotdir = lxc_putold
 
 # NOTICE: These mounts must be applied within the namespace
-
+{{if .ProcessConfig.Privileged}}
 # WARNING: mounting procfs and/or sysfs read-write is a known attack vector.
 # See e.g. http://blog.zx2c4.com/749 and http://bit.ly/T9CkqJ
 # We mount them read-write here, but later, dockerinit will call the Restrict() function to remount them read-only.
 # We cannot mount them directly read-only, because that would prevent loading AppArmor profiles.
 lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0
 lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0
+	{{if .AppArmor}}
+lxc.aa_profile = unconfined
+	{{end}}
+{{else}}
+# In non-privileged mode, lxc will automatically mount /proc and /sys in readonly mode
+# for security. See: http://man7.org/linux/man-pages/man5/lxc.container.conf.5.html
+lxc.mount.auto = proc sys
+	{{if .AppArmorProfile}}
+lxc.aa_profile = {{.AppArmorProfile}}
+	{{end}}
+{{end}}
 
 {{if .ProcessConfig.Tty}}
 lxc.mount.entry = {{.ProcessConfig.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0
@@ -86,26 +96,6 @@ lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabS
 {{end}}
 {{end}}
 
-{{if .ProcessConfig.Env}}
-lxc.utsname = {{getHostname .ProcessConfig.Env}}
-{{end}}
-
-{{if .ProcessConfig.Privileged}}
-# No cap values are needed, as lxc is starting in privileged mode
-{{else}}
-{{range $value := keepCapabilities .CapAdd .CapDrop}}
-lxc.cap.keep = {{$value}}
-{{end}}
-{{end}}
-
-{{if .ProcessConfig.Privileged}}
-{{if .AppArmor}}
-lxc.aa_profile = unconfined
-{{else}}
-# Let AppArmor normal confinement take place (i.e., not unconfined)
-{{end}}
-{{end}}
-
 # limits
 {{if .Resources}}
 {{if .Resources.Memory}}
@@ -128,6 +118,35 @@ lxc.cgroup.cpuset.cpus = {{.Resources.Cpuset}}
 lxc.{{$value}}
 {{end}}
 {{end}}
+
+{{if .Network.Interface}}
+{{if .Network.Interface.IPAddress}}
+lxc.network.ipv4 = {{.Network.Interface.IPAddress}}/{{.Network.Interface.IPPrefixLen}}
+{{end}}
+{{if .Network.Interface.Gateway}}
+lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}}
+{{end}}
+
+{{if .ProcessConfig.Env}}
+lxc.utsname = {{getHostname .ProcessConfig.Env}}
+{{end}}
+
+{{if .ProcessConfig.Privileged}}
+# No cap values are needed, as lxc is starting in privileged mode
+{{else}}
+	{{ with keepCapabilities .CapAdd .CapDrop }}
+		{{range .}}
+lxc.cap.keep = {{.}}
+		{{end}}
+	{{else}}
+		{{ with dropList .CapDrop }}
+		{{range .}}
+lxc.cap.drop = {{.}}
+		{{end}}
+		{{end}}
+	{{end}}
+{{end}}
+{{end}}
 `
 
 var LxcTemplateCompiled *template.Template
@@ -138,17 +157,39 @@ func escapeFstabSpaces(field string) string {
 	return strings.Replace(field, " ", "\\040", -1)
 }
 
-func keepCapabilities(adds []string, drops []string) []string {
+func keepCapabilities(adds []string, drops []string) ([]string, error) {
 	container := nativeTemplate.New()
+	log.Debugf("adds %s drops %s\n", adds, drops)
 	caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops)
+	if err != nil {
+		return nil, err
+	}
 	var newCaps []string
 	for _, cap := range caps {
-		newCaps = append(newCaps, strings.ToLower(cap))
+		log.Debugf("cap %s\n", cap)
+		realCap := capabilities.GetCapability(cap)
+		numCap := fmt.Sprintf("%d", realCap.Value)
+		newCaps = append(newCaps, numCap)
 	}
-	if err != nil {
-		return []string{}
+
+	return newCaps, nil
+}
+
+func dropList(drops []string) ([]string, error) {
+	if utils.StringsContainsNoCase(drops, "all") {
+		var newCaps []string
+		for _, cap := range capabilities.GetAllCapabilities() {
+			log.Debugf("drop cap %s\n", cap)
+			realCap := capabilities.GetCapability(cap)
+			if realCap == nil {
+				return nil, fmt.Errorf("Invalid capability '%s'", cap)
+			}
+			numCap := fmt.Sprintf("%d", realCap.Value)
+			newCaps = append(newCaps, numCap)
+		}
+		return newCaps, nil
 	}
-	return newCaps
+	return []string{}, nil
 }
 
 func isDirectory(source string) string {
@@ -203,6 +244,7 @@ func init() {
 		"formatMountLabel":  label.FormatMountLabel,
 		"isDirectory":       isDirectory,
 		"keepCapabilities":  keepCapabilities,
+		"dropList":          dropList,
 		"getHostname":       getHostname,
 	}
 	LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)

+ 80 - 12
daemon/execdriver/lxc/lxc_template_unit_test.go

@@ -5,6 +5,11 @@ package lxc
 import (
 	"bufio"
 	"fmt"
+	"github.com/docker/docker/daemon/execdriver"
+	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
+	"github.com/docker/libcontainer/devices"
+	"github.com/docker/libcontainer/security/capabilities"
+	"github.com/syndtr/gocapability/capability"
 	"io/ioutil"
 	"math/rand"
 	"os"
@@ -12,10 +17,6 @@ import (
 	"strings"
 	"testing"
 	"time"
-
-	"github.com/docker/docker/daemon/execdriver"
-	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
-	"github.com/docker/libcontainer/devices"
 )
 
 func TestLXCConfig(t *testing.T) {
@@ -241,6 +242,71 @@ func TestCustomLxcConfigMounts(t *testing.T) {
 }
 
 func TestCustomLxcConfigMisc(t *testing.T) {
+	root, err := ioutil.TempDir("", "TestCustomLxcConfig")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(root)
+	os.MkdirAll(path.Join(root, "containers", "1"), 0777)
+	driver, err := NewDriver(root, "", true)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	processConfig := execdriver.ProcessConfig{
+		Privileged: false,
+	}
+
+	processConfig.Env = []string{"HOSTNAME=testhost"}
+	command := &execdriver.Command{
+		ID: "1",
+		LxcConfig: []string{
+			"lxc.cgroup.cpuset.cpus = 0,1",
+		},
+		Network: &execdriver.Network{
+			Mtu: 1500,
+			Interface: &execdriver.NetworkInterface{
+				Gateway:     "10.10.10.1",
+				IPAddress:   "10.10.10.10",
+				IPPrefixLen: 24,
+				Bridge:      "docker0",
+			},
+		},
+		ProcessConfig:   processConfig,
+		CapAdd:          []string{"net_admin", "syslog"},
+		CapDrop:         []string{"kill", "mknod"},
+		AppArmorProfile: "lxc-container-default-with-nesting",
+	}
+
+	p, err := driver.generateLXCConfig(command)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// network
+	grepFile(t, p, "lxc.network.type = veth")
+	grepFile(t, p, "lxc.network.link = docker0")
+	grepFile(t, p, "lxc.network.name = eth0")
+	grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24")
+	grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1")
+	grepFile(t, p, "lxc.network.flags = up")
+	grepFile(t, p, "lxc.aa_profile = lxc-container-default-with-nesting")
+	// hostname
+	grepFile(t, p, "lxc.utsname = testhost")
+	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
+	container := nativeTemplate.New()
+	for _, cap := range container.Capabilities {
+		realCap := capabilities.GetCapability(cap)
+		numCap := fmt.Sprintf("%d", realCap.Value)
+		if cap != "MKNOD" && cap != "KILL" {
+			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap))
+		}
+	}
+
+	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true)
+	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true)
+}
+
+func TestCustomLxcConfigMiscOverride(t *testing.T) {
 	root, err := ioutil.TempDir("", "TestCustomLxcConfig")
 	if err != nil {
 		t.Fatal(err)
@@ -260,6 +326,7 @@ func TestCustomLxcConfigMisc(t *testing.T) {
 		ID: "1",
 		LxcConfig: []string{
 			"lxc.cgroup.cpuset.cpus = 0,1",
+			"lxc.network.ipv4 = 172.0.0.1",
 		},
 		Network: &execdriver.Network{
 			Mtu: 1500,
@@ -271,8 +338,8 @@ func TestCustomLxcConfigMisc(t *testing.T) {
 			},
 		},
 		ProcessConfig: processConfig,
-		CapAdd:        []string{"net_admin", "syslog"},
-		CapDrop:       []string{"kill", "mknod"},
+		CapAdd:        []string{"NET_ADMIN", "SYSLOG"},
+		CapDrop:       []string{"KILL", "MKNOD"},
 	}
 
 	p, err := driver.generateLXCConfig(command)
@@ -283,7 +350,7 @@ func TestCustomLxcConfigMisc(t *testing.T) {
 	grepFile(t, p, "lxc.network.type = veth")
 	grepFile(t, p, "lxc.network.link = docker0")
 	grepFile(t, p, "lxc.network.name = eth0")
-	grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24")
+	grepFile(t, p, "lxc.network.ipv4 = 172.0.0.1")
 	grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1")
 	grepFile(t, p, "lxc.network.flags = up")
 
@@ -292,11 +359,12 @@ func TestCustomLxcConfigMisc(t *testing.T) {
 	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
 	container := nativeTemplate.New()
 	for _, cap := range container.Capabilities {
-		cap = strings.ToLower(cap)
-		if cap != "mknod" && cap != "kill" {
-			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", cap))
+		realCap := capabilities.GetCapability(cap)
+		numCap := fmt.Sprintf("%d", realCap.Value)
+		if cap != "MKNOD" && cap != "KILL" {
+			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap))
 		}
 	}
-	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = kill"), true)
-	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = mknod"), true)
+	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true)
+	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true)
 }

+ 22 - 7
daemon/execdriver/native/create.go

@@ -31,6 +31,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
 	container.Cgroups.AllowedDevices = c.AllowedDevices
 	container.MountConfig.DeviceNodes = c.AutoCreatedDevices
 	container.RootFs = c.Rootfs
+	container.MountConfig.ReadonlyFs = c.ReadonlyRootfs
 
 	// check to see if we are running in ramdisk to disable pivot root
 	container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
@@ -40,6 +41,10 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
 		return nil, err
 	}
 
+	if err := d.createPid(container, c); err != nil {
+		return nil, err
+	}
+
 	if err := d.createNetwork(container, c); err != nil {
 		return nil, err
 	}
@@ -82,7 +87,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
 
 func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Command) error {
 	if c.Network.HostNetworking {
-		container.Namespaces["NEWNET"] = false
+		container.Namespaces.Remove(libcontainer.NEWNET)
 		return nil
 	}
 
@@ -105,6 +110,10 @@ func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Com
 			Bridge:     c.Network.Interface.Bridge,
 			VethPrefix: "veth",
 		}
+		if c.Network.Interface.GlobalIPv6Address != "" {
+			vethNetwork.IPv6Address = fmt.Sprintf("%s/%d", c.Network.Interface.GlobalIPv6Address, c.Network.Interface.GlobalIPv6PrefixLen)
+			vethNetwork.IPv6Gateway = c.Network.Interface.IPv6Gateway
+		}
 		container.Networks = append(container.Networks, &vethNetwork)
 	}
 
@@ -119,10 +128,7 @@ func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Com
 		cmd := active.cmd
 
 		nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
-		container.Networks = append(container.Networks, &libcontainer.Network{
-			Type:   "netns",
-			NsPath: nspath,
-		})
+		container.Namespaces.Add(libcontainer.NEWNET, nspath)
 	}
 
 	return nil
@@ -130,7 +136,7 @@ func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Com
 
 func (d *driver) createIpc(container *libcontainer.Config, c *execdriver.Command) error {
 	if c.Ipc.HostIpc {
-		container.Namespaces["NEWIPC"] = false
+		container.Namespaces.Remove(libcontainer.NEWIPC)
 		return nil
 	}
 
@@ -144,7 +150,16 @@ func (d *driver) createIpc(container *libcontainer.Config, c *execdriver.Command
 		}
 		cmd := active.cmd
 
-		container.IpcNsPath = filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "ipc")
+		container.Namespaces.Add(libcontainer.NEWIPC, filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "ipc"))
+	}
+
+	return nil
+}
+
+func (d *driver) createPid(container *libcontainer.Config, c *execdriver.Command) error {
+	if c.Pid.HostPid {
+		container.Namespaces.Remove(libcontainer.NEWPID)
+		return nil
 	}
 
 	return nil

+ 50 - 15
daemon/execdriver/native/driver.go

@@ -13,9 +13,11 @@ import (
 	"strings"
 	"sync"
 	"syscall"
+	"time"
 
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
+	sysinfo "github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/apparmor"
@@ -41,30 +43,31 @@ type driver struct {
 	root             string
 	initPath         string
 	activeContainers map[string]*activeContainer
+	machineMemory    int64
 	sync.Mutex
 }
 
 func NewDriver(root, initPath string) (*driver, error) {
-	if err := os.MkdirAll(root, 0700); err != nil {
+	meminfo, err := sysinfo.ReadMemInfo()
+	if err != nil {
 		return nil, err
 	}
 
+	if err := os.MkdirAll(root, 0700); err != nil {
+		return nil, err
+	}
 	// native driver root is at docker_root/execdriver/native. Put apparmor at docker_root
 	if err := apparmor.InstallDefaultProfile(); err != nil {
 		return nil, err
 	}
-
 	return &driver{
 		root:             root,
 		initPath:         initPath,
 		activeContainers: make(map[string]*activeContainer),
+		machineMemory:    meminfo.MemTotal,
 	}, nil
 }
 
-func (d *driver) notifyOnOOM(config *libcontainer.Config) (<-chan struct{}, error) {
-	return fs.NotifyOnOOM(config.Cgroups)
-}
-
 type execOutput struct {
 	exitCode int
 	err      error
@@ -74,7 +77,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	// take the Command and populate the libcontainer.Config from it
 	container, err := d.createContainer(c)
 	if err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 
 	var term execdriver.Terminal
@@ -85,7 +88,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
 	}
 	if err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	c.ProcessConfig.Terminal = term
 
@@ -102,12 +105,12 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	)
 
 	if err := d.createContainerRoot(c.ID); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	defer d.cleanContainer(c.ID)
 
 	if err := d.writeContainerFile(container, c.ID); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 
 	execOutputChan := make(chan execOutput, 1)
@@ -146,22 +149,27 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 
 	select {
 	case execOutput := <-execOutputChan:
-		return execdriver.ExitStatus{execOutput.exitCode, false}, execOutput.err
+		return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err
 	case <-waitForStart:
 		break
 	}
 
 	oomKill := false
-	oomKillNotification, err := d.notifyOnOOM(container)
+	state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
 	if err == nil {
-		_, oomKill = <-oomKillNotification
+		oomKillNotification, err := libcontainer.NotifyOnOOM(state)
+		if err == nil {
+			_, oomKill = <-oomKillNotification
+		} else {
+			log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
+		}
 	} else {
-		log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
+		log.Warnf("Failed to get container state, oom notify will not work: %s", err)
 	}
 	// wait for the container to exit.
 	execOutput := <-execOutputChan
 
-	return execdriver.ExitStatus{execOutput.exitCode, oomKill}, execOutput.err
+	return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err
 }
 
 func (d *driver) Kill(p *execdriver.Command, sig int) error {
@@ -278,6 +286,33 @@ func (d *driver) Clean(id string) error {
 	return os.RemoveAll(filepath.Join(d.root, id))
 }
 
+func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
+	c := d.activeContainers[id]
+	state, err := libcontainer.GetState(filepath.Join(d.root, id))
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil, execdriver.ErrNotRunning
+		}
+		return nil, err
+	}
+	now := time.Now()
+	stats, err := libcontainer.GetStats(nil, state)
+	if err != nil {
+		return nil, err
+	}
+	memoryLimit := c.container.Cgroups.Memory
+	// if the container does not have any memory limit specified set the
+	// limit to the machines memory
+	if memoryLimit == 0 {
+		memoryLimit = d.machineMemory
+	}
+	return &execdriver.ResourceStats{
+		Read:           now,
+		ContainerStats: stats,
+		MemoryLimit:    memoryLimit,
+	}, nil
+}
+
 func getEnv(key string, env []string) string {
 	for _, pair := range env {
 		parts := strings.Split(pair, "=")

+ 7 - 7
daemon/execdriver/native/template/default_template.go

@@ -25,13 +25,13 @@ func New() *libcontainer.Config {
 			"KILL",
 			"AUDIT_WRITE",
 		},
-		Namespaces: map[string]bool{
-			"NEWNS":  true,
-			"NEWUTS": true,
-			"NEWIPC": true,
-			"NEWPID": true,
-			"NEWNET": true,
-		},
+		Namespaces: libcontainer.Namespaces([]libcontainer.Namespace{
+			{Type: "NEWNS"},
+			{Type: "NEWUTS"},
+			{Type: "NEWIPC"},
+			{Type: "NEWPID"},
+			{Type: "NEWNET"},
+		}),
 		Cgroups: &cgroups.Cgroup{
 			Parent:          "docker",
 			AllowAllDevices: false,

+ 20 - 15
daemon/graphdriver/aufs/aufs.go

@@ -45,6 +45,7 @@ var (
 		graphdriver.FsMagicBtrfs,
 		graphdriver.FsMagicAufs,
 	}
+	backingFs = "<unknown>"
 )
 
 func init() {
@@ -60,20 +61,22 @@ type Driver struct {
 // New returns a new AUFS driver.
 // An error is returned if AUFS is not supported.
 func Init(root string, options []string) (graphdriver.Driver, error) {
+
 	// Try to load the aufs kernel module
 	if err := supportsAufs(); err != nil {
 		return nil, graphdriver.ErrNotSupported
 	}
 
-	rootdir := path.Dir(root)
-
-	var buf syscall.Statfs_t
-	if err := syscall.Statfs(rootdir, &buf); err != nil {
-		return nil, fmt.Errorf("Couldn't stat the root directory: %s", err)
+	fsMagic, err := graphdriver.GetFSMagic(root)
+	if err != nil {
+		return nil, err
+	}
+	if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
+		backingFs = fsName
 	}
 
 	for _, magic := range incompatibleFsMagic {
-		if graphdriver.FsMagic(buf.Type) == magic {
+		if fsMagic == magic {
 			return nil, graphdriver.ErrIncompatibleFS
 		}
 	}
@@ -134,25 +137,26 @@ func supportsAufs() error {
 	return ErrAufsNotSupported
 }
 
-func (a Driver) rootPath() string {
+func (a *Driver) rootPath() string {
 	return a.root
 }
 
-func (Driver) String() string {
+func (*Driver) String() string {
 	return "aufs"
 }
 
-func (a Driver) Status() [][2]string {
+func (a *Driver) Status() [][2]string {
 	ids, _ := loadIds(path.Join(a.rootPath(), "layers"))
 	return [][2]string{
 		{"Root Dir", a.rootPath()},
+		{"Backing Filesystem", backingFs},
 		{"Dirs", fmt.Sprintf("%d", len(ids))},
 	}
 }
 
 // Exists returns true if the given id is registered with
 // this driver
-func (a Driver) Exists(id string) bool {
+func (a *Driver) Exists(id string) bool {
 	if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil {
 		return false
 	}
@@ -278,7 +282,7 @@ func (a *Driver) Get(id, mountLabel string) (string, error) {
 	return out, nil
 }
 
-func (a *Driver) Put(id string) {
+func (a *Driver) Put(id string) error {
 	// Protect the a.active from concurrent access
 	a.Lock()
 	defer a.Unlock()
@@ -293,6 +297,7 @@ func (a *Driver) Put(id string) {
 		}
 		delete(a.active, id)
 	}
+	return nil
 }
 
 // Diff produces an archive of the changes between the specified
@@ -300,8 +305,8 @@ func (a *Driver) Put(id string) {
 func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
 	// AUFS doesn't need the parent layer to produce a diff.
 	return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
-		Compression: archive.Uncompressed,
-		Excludes:    []string{".wh..wh.*"},
+		Compression:     archive.Uncompressed,
+		ExcludePatterns: []string{".wh..wh.*"},
 	})
 }
 
@@ -312,7 +317,7 @@ func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error {
 // DiffSize calculates the changes between the specified id
 // and its parent and returns the size in bytes of the changes
 // relative to its base filesystem directory.
-func (a *Driver) DiffSize(id, parent string) (bytes int64, err error) {
+func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
 	// AUFS doesn't need the parent layer to calculate the diff size.
 	return utils.TreeSize(path.Join(a.rootPath(), "diff", id))
 }
@@ -320,7 +325,7 @@ func (a *Driver) DiffSize(id, parent string) (bytes int64, err error) {
 // ApplyDiff extracts the changeset from the given diff into the
 // layer with the specified id and parent, returning the size of the
 // new layer in bytes.
-func (a *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) {
+func (a *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) {
 	// AUFS doesn't need the parent id to apply the diff.
 	if err = a.applyDiff(id, diff); err != nil {
 		return

+ 1 - 1
daemon/graphdriver/aufs/aufs_test.go

@@ -568,7 +568,7 @@ func TestStatus(t *testing.T) {
 		t.Fatal("Status should not be nil or empty")
 	}
 	rootDir := status[0]
-	dirs := status[1]
+	dirs := status[2]
 	if rootDir[0] != "Root Dir" {
 		t.Fatalf("Expected Root Dir got %s", rootDir[0])
 	}

+ 2 - 1
daemon/graphdriver/btrfs/btrfs.go

@@ -220,9 +220,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
 	return dir, nil
 }
 
-func (d *Driver) Put(id string) {
+func (d *Driver) Put(id string) error {
 	// Get() creates no runtime resources (like e.g. mounts)
 	// so this doesn't need to do anything.
+	return nil
 }
 
 func (d *Driver) Exists(id string) bool {

+ 40 - 1
daemon/graphdriver/devmapper/README.md

@@ -28,6 +28,45 @@ containers. All base images are snapshots of this device and those
 images are then in turn used as snapshots for other images and
 eventually containers.
 
+### Information on `docker info`
+
+As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver
+will display something like:
+
+	$ sudo docker info
+	[...]
+	Storage Driver: devicemapper
+	 Pool Name: docker-253:1-17538953-pool
+	 Pool Blocksize: 65.54 kB
+	 Data file: /dev/loop4
+	 Metadata file: /dev/loop4
+	 Data Space Used: 2.536 GB
+	 Data Space Total: 107.4 GB
+	 Metadata Space Used: 7.93 MB
+	 Metadata Space Total: 2.147 GB
+	 Udev Sync Supported: true
+	 Data loop file: /home/docker/devicemapper/devicemapper/data
+	 Metadata loop file: /home/docker/devicemapper/devicemapper/metadata
+	 Library Version: 1.02.82-git (2013-10-04)
+	[...]
+
+#### status items
+
+Each item in the indented section under `Storage Driver: devicemapper` are
+status information about the driver.
+ *  `Pool Name` name of the devicemapper pool for this driver.
+ *  `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation.
+ *  `Data file` blockdevice file used for the devicemapper data
+ *  `Metadata file` blockdevice file used for the devicemapper metadata
+ *  `Data Space Used` tells how much of `Data file` is currently used
+ *  `Data Space Total` tells max size the `Data file`
+ *  `Metadata Space Used` tells how much of `Metadata file` is currently used
+ *  `Metadata Space Total` tells max size the `Metadata file`
+ *  `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`.
+ *  `Data loop file` file attached to `Data file`, if loopback device is used
+ *  `Metadata loop file` file attached to `Metadata file`, if loopback device is used
+ *  `Library Version` from the libdevmapper used
+
 ### options
 
 The devicemapper backend supports some options that you can specify
@@ -162,7 +201,7 @@ Here is the list of supported options:
 
     Enables or disables the use of blkdiscard when removing
     devicemapper devices. This is enabled by default (only) if using
-    loopback devices and is required to res-parsify the loopback file
+    loopback devices and is required to resparsify the loopback file
     on image/container removal.
 
     Disabling this on loopback can lead to *much* faster container

+ 65 - 35
daemon/graphdriver/devmapper/deviceset.go

@@ -45,15 +45,15 @@ type Transaction struct {
 }
 
 type DevInfo struct {
-	Hash          string     `json:"-"`
-	DeviceId      int        `json:"device_id"`
-	Size          uint64     `json:"size"`
-	TransactionId uint64     `json:"transaction_id"`
-	Initialized   bool       `json:"initialized"`
-	devices       *DeviceSet `json:"-"`
+	Hash          string `json:"-"`
+	DeviceId      int    `json:"device_id"`
+	Size          uint64 `json:"size"`
+	TransactionId uint64 `json:"transaction_id"`
+	Initialized   bool   `json:"initialized"`
+	devices       *DeviceSet
 
-	mountCount int    `json:"-"`
-	mountPath  string `json:"-"`
+	mountCount int
+	mountPath  string
 
 	// The global DeviceSet lock guarantees that we serialize all
 	// the calls to libdevmapper (which is not threadsafe), but we
@@ -65,12 +65,12 @@ type DevInfo struct {
 	// the global lock while holding the per-device locks all
 	// device locks must be aquired *before* the device lock, and
 	// multiple device locks should be aquired parent before child.
-	lock sync.Mutex `json:"-"`
+	lock sync.Mutex
 }
 
 type MetaData struct {
 	Devices     map[string]*DevInfo `json:"Devices"`
-	devicesLock sync.Mutex          `json:"-"` // Protects all read/writes to Devices map
+	devicesLock sync.Mutex          // Protects all read/writes to Devices map
 }
 
 type DeviceSet struct {
@@ -89,8 +89,10 @@ type DeviceSet struct {
 	filesystem           string
 	mountOptions         string
 	mkfsArgs             []string
-	dataDevice           string
-	metadataDevice       string
+	dataDevice           string // block or loop dev
+	dataLoopFile         string // loopback file, if used
+	metadataDevice       string // block or loop dev
+	metadataLoopFile     string // loopback file, if used
 	doBlkDiscard         bool
 	thinpBlockSize       uint32
 	thinPoolDevice       string
@@ -103,12 +105,15 @@ type DiskUsage struct {
 }
 
 type Status struct {
-	PoolName         string
-	DataLoopback     string
-	MetadataLoopback string
-	Data             DiskUsage
-	Metadata         DiskUsage
-	SectorSize       uint64
+	PoolName          string
+	DataFile          string // actual block device for data
+	DataLoopback      string // loopback file, if used
+	MetadataFile      string // actual block device for metadata
+	MetadataLoopback  string // loopback file, if used
+	Data              DiskUsage
+	Metadata          DiskUsage
+	SectorSize        uint64
+	UdevSyncSupported bool
 }
 
 type DevStatus struct {
@@ -712,8 +717,10 @@ func setCloseOnExec(name string) {
 }
 
 func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) {
-	if level >= 7 {
-		return // Ignore _LOG_DEBUG
+	if level >= devicemapper.LogLevelDebug {
+		// (vbatts) libdm debug is very verbose. If you're debugging libdm, you can
+		// comment out this check yourself
+		level = devicemapper.LogLevelInfo
 	}
 
 	// FIXME(vbatts) push this back into ./pkg/devicemapper/
@@ -934,6 +941,11 @@ func (devices *DeviceSet) closeTransaction() error {
 }
 
 func (devices *DeviceSet) initDevmapper(doInit bool) error {
+	if os.Getenv("DEBUG") != "" {
+		devicemapper.LogInitVerbose(devicemapper.LogLevelDebug)
+	} else {
+		devicemapper.LogInitVerbose(devicemapper.LogLevelWarn)
+	}
 	// give ourselves to libdm as a log handler
 	devicemapper.LogInit(devices)
 
@@ -943,6 +955,12 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 		return graphdriver.ErrNotSupported
 	}
 
+	// https://github.com/docker/docker/issues/4036
+	if supported := devicemapper.UdevSetSyncSupport(true); !supported {
+		log.Warnf("WARNING: Udev sync is not supported. This will lead to unexpected behavior, data loss and errors")
+	}
+	log.Debugf("devicemapper: udev sync support: %v", devicemapper.UdevSyncSupported())
+
 	if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) {
 		return err
 	}
@@ -1013,6 +1031,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 			if err != nil {
 				return err
 			}
+			devices.dataLoopFile = data
+			devices.dataDevice = dataFile.Name()
 		} else {
 			dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600)
 			if err != nil {
@@ -1044,6 +1064,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 			if err != nil {
 				return err
 			}
+			devices.metadataLoopFile = metadata
+			devices.metadataDevice = metadataFile.Name()
 		} else {
 			metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600)
 			if err != nil {
@@ -1084,7 +1106,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 
 func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
 	log.Debugf("[deviceset] AddDevice() hash=%s basehash=%s", hash, baseHash)
-	defer log.Debugf("[deviceset] AddDevice END")
+	defer log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s) END", hash, baseHash)
 
 	baseInfo, err := devices.lookupDevice(baseHash)
 	if err != nil {
@@ -1188,7 +1210,7 @@ func (devices *DeviceSet) deactivatePool() error {
 
 func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
 	log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
-	defer log.Debugf("[devmapper] deactivateDevice END")
+	defer log.Debugf("[devmapper] deactivateDevice END(%s)", info.Hash)
 
 	// Wait for the unmount to be effective,
 	// by watching the value of Info.OpenCount for the device
@@ -1410,7 +1432,7 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
 
 func (devices *DeviceSet) UnmountDevice(hash string) error {
 	log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
-	defer log.Debugf("[devmapper] UnmountDevice END")
+	defer log.Debugf("[devmapper] UnmountDevice(hash=%s) END", hash)
 
 	info, err := devices.lookupDevice(hash)
 	if err != nil {
@@ -1424,7 +1446,7 @@ func (devices *DeviceSet) UnmountDevice(hash string) error {
 	defer devices.Unlock()
 
 	if info.mountCount == 0 {
-		return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash)
+		return fmt.Errorf("UnmountDevice: device not-mounted id %s", hash)
 	}
 
 	info.mountCount--
@@ -1433,7 +1455,7 @@ func (devices *DeviceSet) UnmountDevice(hash string) error {
 	}
 
 	log.Debugf("[devmapper] Unmount(%s)", info.mountPath)
-	if err := syscall.Unmount(info.mountPath, 0); err != nil {
+	if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
 		return err
 	}
 	log.Debugf("[devmapper] Unmount done")
@@ -1540,6 +1562,19 @@ func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataU
 	return
 }
 
+// MetadataDevicePath returns the path to the metadata storage for this deviceset,
+// regardless of loopback or block device
+func (devices *DeviceSet) DataDevicePath() string {
+	return devices.dataDevice
+}
+
+// MetadataDevicePath returns the path to the metadata storage for this deviceset,
+// regardless of loopback or block device
+func (devices *DeviceSet) MetadataDevicePath() string {
+	return devices.metadataDevice
+}
+
+// Status returns the current status of this deviceset
 func (devices *DeviceSet) Status() *Status {
 	devices.Lock()
 	defer devices.Unlock()
@@ -1547,16 +1582,11 @@ func (devices *DeviceSet) Status() *Status {
 	status := &Status{}
 
 	status.PoolName = devices.getPoolName()
-	if len(devices.dataDevice) > 0 {
-		status.DataLoopback = devices.dataDevice
-	} else {
-		status.DataLoopback = path.Join(devices.loopbackDir(), "data")
-	}
-	if len(devices.metadataDevice) > 0 {
-		status.MetadataLoopback = devices.metadataDevice
-	} else {
-		status.MetadataLoopback = path.Join(devices.loopbackDir(), "metadata")
-	}
+	status.DataFile = devices.DataDevicePath()
+	status.DataLoopback = devices.dataLoopFile
+	status.MetadataFile = devices.MetadataDevicePath()
+	status.MetadataLoopback = devices.metadataLoopFile
+	status.UdevSyncSupported = devicemapper.UdevSyncSupported()
 
 	totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus()
 	if err == nil {

+ 29 - 9
daemon/graphdriver/devmapper/driver.go

@@ -29,7 +29,17 @@ type Driver struct {
 	home string
 }
 
+var backingFs = "<unknown>"
+
 func Init(home string, options []string) (graphdriver.Driver, error) {
+	fsMagic, err := graphdriver.GetFSMagic(home)
+	if err != nil {
+		return nil, err
+	}
+	if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
+		backingFs = fsName
+	}
+
 	deviceSet, err := NewDeviceSet(home, true, options)
 	if err != nil {
 		return nil, err
@@ -56,13 +66,21 @@ func (d *Driver) Status() [][2]string {
 
 	status := [][2]string{
 		{"Pool Name", s.PoolName},
-		{"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(int64(s.SectorSize)))},
-		{"Data file", s.DataLoopback},
-		{"Metadata file", s.MetadataLoopback},
-		{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Used)))},
-		{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Total)))},
-		{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Used)))},
-		{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Total)))},
+		{"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))},
+		{"Backing Filesystem", backingFs},
+		{"Data file", s.DataFile},
+		{"Metadata file", s.MetadataFile},
+		{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))},
+		{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))},
+		{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))},
+		{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
+		{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
+	}
+	if len(s.DataLoopback) > 0 {
+		status = append(status, [2]string{"Data loop file", s.DataLoopback})
+	}
+	if len(s.MetadataLoopback) > 0 {
+		status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback})
 	}
 	if vStr, err := devicemapper.GetLibraryVersion(); err == nil {
 		status = append(status, [2]string{"Library Version", vStr})
@@ -141,10 +159,12 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
 	return rootFs, nil
 }
 
-func (d *Driver) Put(id string) {
-	if err := d.DeviceSet.UnmountDevice(id); err != nil {
+func (d *Driver) Put(id string) error {
+	err := d.DeviceSet.UnmountDevice(id)
+	if err != nil {
 		log.Errorf("Warning: error unmounting device %s: %s", id, err)
 	}
+	return err
 }
 
 func (d *Driver) Exists(id string) bool {

+ 69 - 26
daemon/graphdriver/driver.go

@@ -5,15 +5,61 @@ import (
 	"fmt"
 	"os"
 	"path"
+	"strings"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/archive"
 )
 
-type FsMagic uint64
+type FsMagic uint32
 
 const (
-	FsMagicBtrfs = FsMagic(0x9123683E)
-	FsMagicAufs  = FsMagic(0x61756673)
+	FsMagicBtrfs       = FsMagic(0x9123683E)
+	FsMagicAufs        = FsMagic(0x61756673)
+	FsMagicExtfs       = FsMagic(0x0000EF53)
+	FsMagicCramfs      = FsMagic(0x28cd3d45)
+	FsMagicRamFs       = FsMagic(0x858458f6)
+	FsMagicTmpFs       = FsMagic(0x01021994)
+	FsMagicSquashFs    = FsMagic(0x73717368)
+	FsMagicNfsFs       = FsMagic(0x00006969)
+	FsMagicReiserFs    = FsMagic(0x52654973)
+	FsMagicSmbFs       = FsMagic(0x0000517B)
+	FsMagicJffs2Fs     = FsMagic(0x000072b6)
+	FsMagicUnsupported = FsMagic(0x00000000)
+)
+
+var (
+	DefaultDriver string
+	// All registred drivers
+	drivers map[string]InitFunc
+	// Slice of drivers that should be used in an order
+	priority = []string{
+		"aufs",
+		"btrfs",
+		"devicemapper",
+		"vfs",
+		// experimental, has to be enabled manually for now
+		"overlay",
+	}
+
+	ErrNotSupported   = errors.New("driver not supported")
+	ErrPrerequisites  = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
+	ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
+
+	FsNames = map[FsMagic]string{
+		FsMagicAufs:        "aufs",
+		FsMagicBtrfs:       "btrfs",
+		FsMagicExtfs:       "extfs",
+		FsMagicCramfs:      "cramfs",
+		FsMagicRamFs:       "ramfs",
+		FsMagicTmpFs:       "tmpfs",
+		FsMagicSquashFs:    "squashfs",
+		FsMagicNfsFs:       "nfs",
+		FsMagicReiserFs:    "reiserfs",
+		FsMagicSmbFs:       "smb",
+		FsMagicJffs2Fs:     "jffs2",
+		FsMagicUnsupported: "unsupported",
+	}
 )
 
 type InitFunc func(root string, options []string) (Driver, error)
@@ -38,7 +84,7 @@ type ProtoDriver interface {
 	Get(id, mountLabel string) (dir string, err error)
 	// Put releases the system resources for the specified id,
 	// e.g, unmounting layered filesystem.
-	Put(id string)
+	Put(id string) error
 	// Exists returns whether a filesystem layer with the specified
 	// ID exists on this driver.
 	Exists(id string) bool
@@ -63,32 +109,13 @@ type Driver interface {
 	// ApplyDiff extracts the changeset from the given diff into the
 	// layer with the specified id and parent, returning the size of the
 	// new layer in bytes.
-	ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error)
+	ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error)
 	// DiffSize calculates the changes between the specified id
 	// and its parent and returns the size in bytes of the changes
 	// relative to its base filesystem directory.
-	DiffSize(id, parent string) (bytes int64, err error)
+	DiffSize(id, parent string) (size int64, err error)
 }
 
-var (
-	DefaultDriver string
-	// All registred drivers
-	drivers map[string]InitFunc
-	// Slice of drivers that should be used in an order
-	priority = []string{
-		"aufs",
-		"btrfs",
-		"devicemapper",
-		"vfs",
-		// experimental, has to be enabled manually for now
-		"overlay",
-	}
-
-	ErrNotSupported   = errors.New("driver not supported")
-	ErrPrerequisites  = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
-	ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
-)
-
 func init() {
 	drivers = make(map[string]InitFunc)
 }
@@ -125,18 +152,34 @@ func New(root string, options []string) (driver Driver, err error) {
 			}
 			return nil, err
 		}
+		checkPriorDriver(name, root)
 		return driver, nil
 	}
 
 	// Check all registered drivers if no priority driver is found
-	for _, initFunc := range drivers {
+	for name, initFunc := range drivers {
 		if driver, err = initFunc(root, options); err != nil {
 			if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS {
 				continue
 			}
 			return nil, err
 		}
+		checkPriorDriver(name, root)
 		return driver, nil
 	}
 	return nil, fmt.Errorf("No supported storage backend found")
 }
+
+func checkPriorDriver(name, root string) {
+	priorDrivers := []string{}
+	for prior := range drivers {
+		if prior != name && prior != "vfs" {
+			if _, err := os.Stat(path.Join(root, prior)); err == nil {
+				priorDrivers = append(priorDrivers, prior)
+			}
+		}
+	}
+	if len(priorDrivers) > 0 {
+		log.Warnf("graphdriver %s selected. Warning: your graphdriver directory %s already contains data managed by other graphdrivers: %s", name, root, strings.Join(priorDrivers, ","))
+	}
+}

+ 14 - 0
daemon/graphdriver/driver_linux.go

@@ -0,0 +1,14 @@
+package graphdriver
+
+import (
+	"path"
+	"syscall"
+)
+
+func GetFSMagic(rootpath string) (FsMagic, error) {
+	var buf syscall.Statfs_t
+	if err := syscall.Statfs(path.Dir(rootpath), &buf); err != nil {
+		return 0, err
+	}
+	return FsMagic(buf.Type), nil
+}

+ 7 - 0
daemon/graphdriver/driver_unsupported.go

@@ -0,0 +1,7 @@
+// +build !linux
+
+package graphdriver
+
+func GetFSMagic(rootpath string) (FsMagic, error) {
+	return FsMagicUnsupported, nil
+}

+ 6 - 24
daemon/graphdriver/fsdiff.go

@@ -3,14 +3,12 @@
 package graphdriver
 
 import (
-	"fmt"
 	"time"
 
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/utils"
 )
 
 // naiveDiffDriver takes a ProtoDriver and adds the
@@ -27,8 +25,8 @@ type naiveDiffDriver struct {
 // it may or may not support on its own:
 //     Diff(id, parent string) (archive.Archive, error)
 //     Changes(id, parent string) ([]archive.Change, error)
-//     ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error)
-//     DiffSize(id, parent string) (bytes int64, err error)
+//     ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error)
+//     DiffSize(id, parent string) (size int64, err error)
 func NaiveDiffDriver(driver ProtoDriver) Driver {
 	return &naiveDiffDriver{ProtoDriver: driver}
 }
@@ -111,7 +109,7 @@ func (gdw *naiveDiffDriver) Changes(id, parent string) ([]archive.Change, error)
 // ApplyDiff extracts the changeset from the given diff into the
 // layer with the specified id and parent, returning the size of the
 // new layer in bytes.
-func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) {
+func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) {
 	driver := gdw.ProtoDriver
 
 	// Mount the root filesystem so we can apply the diff/layer.
@@ -123,34 +121,18 @@ func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveRea
 
 	start := time.Now().UTC()
 	log.Debugf("Start untar layer")
-	if err = chrootarchive.ApplyLayer(layerFs, diff); err != nil {
+	if size, err = chrootarchive.ApplyLayer(layerFs, diff); err != nil {
 		return
 	}
 	log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
 
-	if parent == "" {
-		return utils.TreeSize(layerFs)
-	}
-
-	parentFs, err := driver.Get(parent, "")
-	if err != nil {
-		err = fmt.Errorf("Driver %s failed to get image parent %s: %s", driver, parent, err)
-		return
-	}
-	defer driver.Put(parent)
-
-	changes, err := archive.ChangesDirs(layerFs, parentFs)
-	if err != nil {
-		return
-	}
-
-	return archive.ChangesSize(layerFs, changes), nil
+	return
 }
 
 // DiffSize calculates the changes between the specified layer
 // and its parent and returns the size in bytes of the changes
 // relative to its base filesystem directory.
-func (gdw *naiveDiffDriver) DiffSize(id, parent string) (bytes int64, err error) {
+func (gdw *naiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
 	driver := gdw.ProtoDriver
 
 	changes, err := gdw.Changes(id, parent)

+ 2 - 1
daemon/graphdriver/graphtest/graphtest.go

@@ -5,6 +5,7 @@ import (
 	"io/ioutil"
 	"os"
 	"path"
+	"strings"
 	"syscall"
 	"testing"
 
@@ -73,7 +74,7 @@ func newDriver(t *testing.T, name string) *Driver {
 
 	d, err := graphdriver.GetDriver(name, root, nil)
 	if err != nil {
-		if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites {
+		if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || strings.Contains(err.Error(), "'overlay' is not supported over") {
 			t.Skipf("Driver %s not supported", name)
 		}
 		t.Fatal(err)

+ 36 - 16
daemon/graphdriver/overlay/overlay.go

@@ -28,7 +28,7 @@ var (
 
 type ApplyDiffProtoDriver interface {
 	graphdriver.ProtoDriver
-	ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error)
+	ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error)
 }
 
 type naiveDiffDriverWithApply struct {
@@ -90,15 +90,36 @@ type Driver struct {
 	active     map[string]*ActiveMount
 }
 
+var backingFs = "<unknown>"
+
 func init() {
 	graphdriver.Register("overlay", Init)
 }
 
 func Init(home string, options []string) (graphdriver.Driver, error) {
+
 	if err := supportsOverlay(); err != nil {
 		return nil, graphdriver.ErrNotSupported
 	}
 
+	fsMagic, err := graphdriver.GetFSMagic(home)
+	if err != nil {
+		return nil, err
+	}
+	if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
+		backingFs = fsName
+	}
+
+	// check if they are running over btrfs or aufs
+	switch fsMagic {
+	case graphdriver.FsMagicBtrfs:
+		log.Error("'overlay' is not supported over btrfs.")
+		return nil, graphdriver.ErrIncompatibleFS
+	case graphdriver.FsMagicAufs:
+		log.Error("'overlay' is not supported over aufs.")
+		return nil, graphdriver.ErrIncompatibleFS
+	}
+
 	// Create the driver home dir
 	if err := os.MkdirAll(home, 0755); err != nil && !os.IsExist(err) {
 		return nil, err
@@ -138,7 +159,9 @@ func (d *Driver) String() string {
 }
 
 func (d *Driver) Status() [][2]string {
-	return nil
+	return [][2]string{
+		{"Backing Filesystem", backingFs},
+	}
 }
 
 func (d *Driver) Cleanup() error {
@@ -284,7 +307,7 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
 	return mount.path, nil
 }
 
-func (d *Driver) Put(id string) {
+func (d *Driver) Put(id string) error {
 	// Protect the d.active from concurrent access
 	d.Lock()
 	defer d.Unlock()
@@ -292,24 +315,26 @@ func (d *Driver) Put(id string) {
 	mount := d.active[id]
 	if mount == nil {
 		log.Debugf("Put on a non-mounted device %s", id)
-		return
+		return nil
 	}
 
 	mount.count--
 	if mount.count > 0 {
-		return
+		return nil
 	}
 
+	defer delete(d.active, id)
 	if mount.mounted {
-		if err := syscall.Unmount(mount.path, 0); err != nil {
+		err := syscall.Unmount(mount.path, 0)
+		if err != nil {
 			log.Debugf("Failed to unmount %s overlay: %v", id, err)
 		}
+		return err
 	}
-
-	delete(d.active, id)
+	return nil
 }
 
-func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) (bytes int64, err error) {
+func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) (size int64, err error) {
 	dir := d.dir(id)
 
 	if parent == "" {
@@ -347,7 +372,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader)
 		return 0, err
 	}
 
-	if err := chrootarchive.ApplyLayer(tmpRootDir, diff); err != nil {
+	if size, err = chrootarchive.ApplyLayer(tmpRootDir, diff); err != nil {
 		return 0, err
 	}
 
@@ -356,12 +381,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader)
 		return 0, err
 	}
 
-	changes, err := archive.ChangesDirs(rootDir, parentRootDir)
-	if err != nil {
-		return 0, err
-	}
-
-	return archive.ChangesSize(rootDir, changes), nil
+	return
 }
 
 func (d *Driver) Exists(id string) bool {

+ 2 - 11
daemon/graphdriver/vfs/driver.go

@@ -1,10 +1,8 @@
 package vfs
 
 import (
-	"bytes"
 	"fmt"
 	"os"
-	"os/exec"
 	"path"
 
 	"github.com/docker/docker/daemon/graphdriver"
@@ -39,14 +37,6 @@ func (d *Driver) Cleanup() error {
 	return nil
 }
 
-func isGNUcoreutils() bool {
-	if stdout, err := exec.Command("cp", "--version").Output(); err == nil {
-		return bytes.Contains(stdout, []byte("GNU coreutils"))
-	}
-
-	return false
-}
-
 func (d *Driver) Create(id, parent string) error {
 	dir := d.dir(id)
 	if err := os.MkdirAll(path.Dir(dir), 0700); err != nil {
@@ -93,9 +83,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
 	return dir, nil
 }
 
-func (d *Driver) Put(id string) {
+func (d *Driver) Put(id string) error {
 	// The vfs driver has no runtime resources (e.g. mounts)
 	// to clean up, so we don't need anything here
+	return nil
 }
 
 func (d *Driver) Exists(id string) bool {

+ 2 - 2
daemon/image_delete.go

@@ -113,7 +113,7 @@ func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine.
 				return err
 			}
 			out := &engine.Env{}
-			out.Set("Deleted", img.ID)
+			out.SetJson("Deleted", img.ID)
 			imgs.Add(out)
 			eng.Job("log", "delete", img.ID, "").Run()
 			if img.Parent != "" && !noprune {
@@ -131,7 +131,7 @@ func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine.
 
 func (daemon *Daemon) canDeleteImage(imgID string, force bool) error {
 	for _, container := range daemon.List() {
-		parent, err := daemon.Repositories().LookupImage(container.Image)
+		parent, err := daemon.Repositories().LookupImage(container.ImageID)
 		if err != nil {
 			if daemon.Graph().IsNotExist(err) {
 				return nil

+ 12 - 2
daemon/info.go

@@ -55,8 +55,17 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
 	if err := cjob.Run(); err != nil {
 		return job.Error(err)
 	}
+	registryJob := job.Eng.Job("registry_config")
+	registryEnv, _ := registryJob.Stdout.AddEnv()
+	if err := registryJob.Run(); err != nil {
+		return job.Error(err)
+	}
+	registryConfig := registry.ServiceConfig{}
+	if err := registryEnv.GetJson("config", &registryConfig); err != nil {
+		return job.Error(err)
+	}
 	v := &engine.Env{}
-	v.Set("ID", daemon.ID)
+	v.SetJson("ID", daemon.ID)
 	v.SetInt("Containers", len(daemon.List()))
 	v.SetInt("Images", imgcount)
 	v.Set("Driver", daemon.GraphDriver().String())
@@ -72,13 +81,14 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
 	v.Set("KernelVersion", kernelVersion)
 	v.Set("OperatingSystem", operatingSystem)
 	v.Set("IndexServerAddress", registry.IndexServerAddress())
+	v.SetJson("RegistryConfig", registryConfig)
 	v.Set("InitSha1", dockerversion.INITSHA1)
 	v.Set("InitPath", initPath)
 	v.SetInt("NCPU", runtime.NumCPU())
 	v.SetInt64("MemTotal", meminfo.MemTotal)
 	v.Set("DockerRootDir", daemon.Config().Root)
 	if hostname, err := os.Hostname(); err == nil {
-		v.Set("Name", hostname)
+		v.SetJson("Name", hostname)
 	}
 	v.SetList("Labels", daemon.Config().Labels)
 	if _, err := v.WriteTo(job.Stdout); err != nil {

+ 6 - 3
daemon/inspect.go

@@ -29,18 +29,19 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
 		}
 
 		out := &engine.Env{}
-		out.Set("Id", container.ID)
+		out.SetJson("Id", container.ID)
 		out.SetAuto("Created", container.Created)
 		out.SetJson("Path", container.Path)
 		out.SetList("Args", container.Args)
 		out.SetJson("Config", container.Config)
 		out.SetJson("State", container.State)
-		out.Set("Image", container.Image)
+		out.Set("Image", container.ImageID)
 		out.SetJson("NetworkSettings", container.NetworkSettings)
 		out.Set("ResolvConfPath", container.ResolvConfPath)
 		out.Set("HostnamePath", container.HostnamePath)
 		out.Set("HostsPath", container.HostsPath)
-		out.Set("Name", container.Name)
+		out.SetJson("Name", container.Name)
+		out.SetInt("RestartCount", container.RestartCount)
 		out.Set("Driver", container.Driver)
 		out.Set("ExecDriver", container.ExecDriver)
 		out.Set("MountLabel", container.MountLabel)
@@ -49,6 +50,8 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
 		out.SetJson("VolumesRW", container.VolumesRW)
 		out.SetJson("AppArmorProfile", container.AppArmorProfile)
 
+		out.SetList("ExecIDs", container.GetExecIDs())
+
 		if children, err := daemon.Children(container.Name); err == nil {
 			for linkAlias, child := range children {
 				container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))

+ 12 - 5
daemon/list.go

@@ -45,6 +45,14 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 		}
 	}
 
+	if i, ok := psFilters["status"]; ok {
+		for _, value := range i {
+			if value == "exited" {
+				all = true
+			}
+		}
+	}
+
 	names := map[string][]string{}
 	daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
 		names[e.ID()] = append(names[e.ID()], p)
@@ -73,7 +81,6 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 		if !container.Running && !all && n <= 0 && since == "" && before == "" {
 			return nil
 		}
-
 		if !psFilters.Match("name", container.Name) {
 			return nil
 		}
@@ -96,10 +103,10 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 				return errLast
 			}
 		}
-		if len(filt_exited) > 0 && !container.Running {
+		if len(filt_exited) > 0 {
 			should_skip := true
 			for _, code := range filt_exited {
-				if code == container.ExitCode {
+				if code == container.ExitCode && !container.Running {
 					should_skip = false
 					break
 				}
@@ -114,9 +121,9 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 		}
 		displayed++
 		out := &engine.Env{}
-		out.Set("Id", container.ID)
+		out.SetJson("Id", container.ID)
 		out.SetList("Names", names[container.ID])
-		out.Set("Image", daemon.Repositories().ImageName(container.Image))
+		out.SetJson("Image", daemon.Repositories().ImageName(container.ImageID))
 		if len(container.Args) > 0 {
 			args := []string{}
 			for _, arg := range container.Args {

+ 11 - 3
daemon/monitor.go

@@ -9,12 +9,13 @@ import (
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
 const defaultTimeIncrement = 100
 
 // containerMonitor monitors the execution of a container's main process.
-// If a restart policy is specified for the cotnainer the monitor will ensure that the
+// If a restart policy is specified for the container the monitor will ensure that the
 // process is restarted based on the rules of the policy.  When the container is finally stopped
 // the monitor will reset and cleanup any of the container resources such as networking allocations
 // and the rootfs
@@ -154,6 +155,9 @@ func (m *containerMonitor) Start() error {
 
 		if m.shouldRestart(exitStatus.ExitCode) {
 			m.container.SetRestarting(&exitStatus)
+			if exitStatus.OOMKilled {
+				m.container.LogEvent("oom")
+			}
 			m.container.LogEvent("die")
 			m.resetContainer(true)
 
@@ -170,6 +174,9 @@ func (m *containerMonitor) Start() error {
 			continue
 		}
 		m.container.ExitCode = exitStatus.ExitCode
+		if exitStatus.OOMKilled {
+			m.container.LogEvent("oom")
+		}
 		m.container.LogEvent("die")
 		m.resetContainer(true)
 		return err
@@ -223,8 +230,9 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
 		return true
 	case "on-failure":
 		// the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count
-		if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount >= max {
-			log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", max)
+		if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max {
+			log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached",
+				utils.TruncateID(m.container.ID), max)
 			return false
 		}
 

+ 12 - 7
daemon/network_settings.go

@@ -9,13 +9,18 @@ import (
 type PortMapping map[string]string // Deprecated
 
 type NetworkSettings struct {
-	IPAddress   string
-	IPPrefixLen int
-	MacAddress  string
-	Gateway     string
-	Bridge      string
-	PortMapping map[string]PortMapping // Deprecated
-	Ports       nat.PortMap
+	IPAddress              string
+	IPPrefixLen            int
+	MacAddress             string
+	LinkLocalIPv6Address   string
+	LinkLocalIPv6PrefixLen int
+	GlobalIPv6Address      string
+	GlobalIPv6PrefixLen    int
+	Gateway                string
+	IPv6Gateway            string
+	Bridge                 string
+	PortMapping            map[string]PortMapping // Deprecated
+	Ports                  nat.PortMap
 }
 
 func (settings *NetworkSettings) PortMappingAPI() *engine.Table {

+ 239 - 55
daemon/networkdriver/bridge/driver.go

@@ -1,11 +1,13 @@
 package bridge
 
 import (
+	"encoding/hex"
+	"errors"
 	"fmt"
 	"io/ioutil"
 	"net"
 	"os"
-	"strconv"
+	"strings"
 	"sync"
 
 	log "github.com/Sirupsen/logrus"
@@ -28,6 +30,7 @@ const (
 // Network interface represents the networking stack of a container
 type networkInterface struct {
 	IP           net.IP
+	IPv6         net.IP
 	PortMappings []net.Addr // there are mappings to the host interfaces
 }
 
@@ -70,8 +73,10 @@ var (
 		"192.168.44.1/24",
 	}
 
-	bridgeIface   string
-	bridgeNetwork *net.IPNet
+	bridgeIface       string
+	bridgeIPv4Network *net.IPNet
+	bridgeIPv6Addr    net.IP
+	globalIPv6Network *net.IPNet
 
 	defaultBindingIP  = net.ParseIP("0.0.0.0")
 	currentInterfaces = ifaces{c: make(map[string]*networkInterface)}
@@ -79,13 +84,19 @@ var (
 
 func InitDriver(job *engine.Job) engine.Status {
 	var (
-		network        *net.IPNet
+		networkv4      *net.IPNet
+		networkv6      *net.IPNet
+		addrv4         net.Addr
+		addrsv6        []net.Addr
 		enableIPTables = job.GetenvBool("EnableIptables")
+		enableIPv6     = job.GetenvBool("EnableIPv6")
 		icc            = job.GetenvBool("InterContainerCommunication")
 		ipMasq         = job.GetenvBool("EnableIpMasq")
 		ipForward      = job.GetenvBool("EnableIpForward")
 		bridgeIP       = job.Getenv("BridgeIP")
+		bridgeIPv6     = "fe80::1/64"
 		fixedCIDR      = job.Getenv("FixedCIDR")
+		fixedCIDRv6    = job.Getenv("FixedCIDRv6")
 	)
 
 	if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" {
@@ -99,41 +110,97 @@ func InitDriver(job *engine.Job) engine.Status {
 		bridgeIface = DefaultNetworkBridge
 	}
 
-	addr, err := networkdriver.GetIfaceAddr(bridgeIface)
+	addrv4, addrsv6, err := networkdriver.GetIfaceAddr(bridgeIface)
+
 	if err != nil {
+		// No Bridge existent. Create one
 		// If we're not using the default bridge, fail without trying to create it
 		if !usingDefaultBridge {
 			return job.Error(err)
 		}
-		// If the bridge interface is not found (or has no address), try to create it and/or add an address
-		if err := configureBridge(bridgeIP); err != nil {
+
+		// If the iface is not found, try to create it
+		if err := configureBridge(bridgeIP, bridgeIPv6, enableIPv6); err != nil {
 			return job.Error(err)
 		}
 
-		addr, err = networkdriver.GetIfaceAddr(bridgeIface)
+		addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
 		if err != nil {
 			return job.Error(err)
 		}
-		network = addr.(*net.IPNet)
+
+		if fixedCIDRv6 != "" {
+			// Setting route to global IPv6 subnet
+			log.Infof("Adding route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
+			if err := netlink.AddRoute(fixedCIDRv6, "", "", bridgeIface); err != nil {
+				log.Fatalf("Could not add route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
+			}
+		}
 	} else {
-		network = addr.(*net.IPNet)
+		// Bridge exists already. Getting info...
 		// validate that the bridge ip matches the ip specified by BridgeIP
 		if bridgeIP != "" {
+			networkv4 = addrv4.(*net.IPNet)
 			bip, _, err := net.ParseCIDR(bridgeIP)
 			if err != nil {
 				return job.Error(err)
 			}
-			if !network.IP.Equal(bip) {
-				return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bip)
+			if !networkv4.IP.Equal(bip) {
+				return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
+			}
+		}
+
+		// a bridge might exist but not have any IPv6 addr associated with it yet
+		// (for example, an existing Docker installation that has only been used
+		// with IPv4 and docker0 already is set up) In that case, we can perform
+		// the bridge init for IPv6 here, else we will error out below if --ipv6=true
+		if len(addrsv6) == 0 && enableIPv6 {
+			if err := setupIPv6Bridge(bridgeIPv6); err != nil {
+				return job.Error(err)
+			}
+			// recheck addresses now that IPv6 is setup on the bridge
+			addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
+			if err != nil {
+				return job.Error(err)
+			}
+		}
+
+		// TODO: Check if route to fixedCIDRv6 is set
+	}
+
+	if enableIPv6 {
+		bip6, _, err := net.ParseCIDR(bridgeIPv6)
+		if err != nil {
+			return job.Error(err)
+		}
+		found := false
+		for _, addrv6 := range addrsv6 {
+			networkv6 = addrv6.(*net.IPNet)
+			if networkv6.IP.Equal(bip6) {
+				found = true
+				break
 			}
 		}
+		if !found {
+			return job.Errorf("bridge IPv6 does not match existing bridge configuration %s", bip6)
+		}
+	}
+
+	networkv4 = addrv4.(*net.IPNet)
+
+	if enableIPv6 {
+		if len(addrsv6) == 0 {
+			return job.Error(errors.New("IPv6 enabled but no IPv6 detected"))
+		}
+		bridgeIPv6Addr = networkv6.IP
 	}
 
 	// Configure iptables for link support
 	if enableIPTables {
-		if err := setupIPTables(addr, icc, ipMasq); err != nil {
+		if err := setupIPTables(addrv4, icc, ipMasq); err != nil {
 			return job.Error(err)
 		}
+
 	}
 
 	if ipForward {
@@ -141,35 +208,64 @@ func InitDriver(job *engine.Job) engine.Status {
 		if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
 			job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
 		}
+
+		if fixedCIDRv6 != "" {
+			// Enable IPv6 forwarding
+			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil {
+				job.Logf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
+			}
+			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil {
+				job.Logf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
+			}
+		}
 	}
 
 	// We can always try removing the iptables
-	if err := iptables.RemoveExistingChain("DOCKER"); err != nil {
+	if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil {
 		return job.Error(err)
 	}
 
 	if enableIPTables {
-		chain, err := iptables.NewChain("DOCKER", bridgeIface)
+		_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat)
+		if err != nil {
+			return job.Error(err)
+		}
+		chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
 		if err != nil {
 			return job.Error(err)
 		}
 		portmapper.SetIptablesChain(chain)
 	}
 
-	bridgeNetwork = network
+	bridgeIPv4Network = networkv4
 	if fixedCIDR != "" {
 		_, subnet, err := net.ParseCIDR(fixedCIDR)
 		if err != nil {
 			return job.Error(err)
 		}
 		log.Debugf("Subnet: %v", subnet)
-		if err := ipallocator.RegisterSubnet(bridgeNetwork, subnet); err != nil {
+		if err := ipallocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
+			return job.Error(err)
+		}
+	}
+
+	if fixedCIDRv6 != "" {
+		_, subnet, err := net.ParseCIDR(fixedCIDRv6)
+		if err != nil {
+			return job.Error(err)
+		}
+		log.Debugf("Subnet: %v", subnet)
+		if err := ipallocator.RegisterSubnet(subnet, subnet); err != nil {
 			return job.Error(err)
 		}
+		globalIPv6Network = subnet
 	}
 
+	// Block BridgeIP in IP allocator
+	ipallocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)
+
 	// https://github.com/docker/docker/issues/2768
-	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP)
+	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)
 
 	for name, f := range map[string]engine.Handler{
 		"allocate_interface": Allocate,
@@ -257,7 +353,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
 // If the bridge `bridgeIface` already exists, it will only perform the IP address association with the existing
 // bridge (fixes issue #8444)
 // If an address which doesn't conflict with existing interfaces can't be found, an error is returned.
-func configureBridge(bridgeIP string) error {
+func configureBridge(bridgeIP string, bridgeIPv6 string, enableIPv6 bool) error {
 	nameservers := []string{}
 	resolvConf, _ := resolvconf.Get()
 	// we don't check for an error here, because we don't really care
@@ -314,15 +410,46 @@ func configureBridge(bridgeIP string) error {
 		return err
 	}
 
-	if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil {
+	if err := netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil {
 		return fmt.Errorf("Unable to add private network: %s", err)
 	}
+
+	if enableIPv6 {
+		if err := setupIPv6Bridge(bridgeIPv6); err != nil {
+			return err
+		}
+	}
+
 	if err := netlink.NetworkLinkUp(iface); err != nil {
 		return fmt.Errorf("Unable to start network bridge: %s", err)
 	}
 	return nil
 }
 
+func setupIPv6Bridge(bridgeIPv6 string) error {
+
+	iface, err := net.InterfaceByName(bridgeIface)
+	if err != nil {
+		return err
+	}
+	// Enable IPv6 on the bridge
+	procFile := "/proc/sys/net/ipv6/conf/" + iface.Name + "/disable_ipv6"
+	if err := ioutil.WriteFile(procFile, []byte{'0', '\n'}, 0644); err != nil {
+		return fmt.Errorf("Unable to enable IPv6 addresses on bridge: %v", err)
+	}
+
+	ipAddr6, ipNet6, err := net.ParseCIDR(bridgeIPv6)
+	if err != nil {
+		return fmt.Errorf("Unable to parse bridge IPv6 address: %q, error: %v", bridgeIPv6, err)
+	}
+
+	if err := netlink.NetworkLinkAddIp(iface, ipAddr6, ipNet6); err != nil {
+		return fmt.Errorf("Unable to add private IPv6 network: %v", err)
+	}
+
+	return nil
+}
+
 func createBridgeIface(name string) error {
 	kv, err := kernel.GetKernelVersion()
 	// only set the bridge's mac address if the kernel version is > 3.3
@@ -357,20 +484,34 @@ func generateMacAddr(ip net.IP) net.HardwareAddr {
 	return hw
 }
 
+func linkLocalIPv6FromMac(mac string) (string, error) {
+	hx := strings.Replace(mac, ":", "", -1)
+	hw, err := hex.DecodeString(hx)
+	if err != nil {
+		return "", errors.New("Could not parse MAC address " + mac)
+	}
+
+	hw[0] ^= 0x2
+
+	return fmt.Sprintf("fe80::%x%x:%xff:fe%x:%x%x/64", hw[0], hw[1], hw[2], hw[3], hw[4], hw[5]), nil
+}
+
 // Allocate a network interface
 func Allocate(job *engine.Job) engine.Status {
 	var (
-		ip          net.IP
-		mac         net.HardwareAddr
-		err         error
-		id          = job.Args[0]
-		requestedIP = net.ParseIP(job.Getenv("RequestedIP"))
+		ip            net.IP
+		mac           net.HardwareAddr
+		err           error
+		id            = job.Args[0]
+		requestedIP   = net.ParseIP(job.Getenv("RequestedIP"))
+		requestedIPv6 = net.ParseIP(job.Getenv("RequestedIPv6"))
+		globalIPv6    net.IP
 	)
 
 	if requestedIP != nil {
-		ip, err = ipallocator.RequestIP(bridgeNetwork, requestedIP)
+		ip, err = ipallocator.RequestIP(bridgeIPv4Network, requestedIP)
 	} else {
-		ip, err = ipallocator.RequestIP(bridgeNetwork, nil)
+		ip, err = ipallocator.RequestIP(bridgeIPv4Network, nil)
 	}
 	if err != nil {
 		return job.Error(err)
@@ -381,18 +522,53 @@ func Allocate(job *engine.Job) engine.Status {
 		mac = generateMacAddr(ip)
 	}
 
+	if globalIPv6Network != nil {
+		// if globalIPv6Network Size is at least a /80 subnet generate IPv6 address from MAC address
+		netmask_ones, _ := globalIPv6Network.Mask.Size()
+		if requestedIPv6 == nil && netmask_ones <= 80 {
+			requestedIPv6 = globalIPv6Network.IP
+			for i, h := range mac {
+				requestedIPv6[i+10] = h
+			}
+		}
+
+		globalIPv6, err = ipallocator.RequestIP(globalIPv6Network, requestedIPv6)
+		if err != nil {
+			log.Errorf("Allocator: RequestIP v6: %s", err.Error())
+			return job.Error(err)
+		}
+		log.Infof("Allocated IPv6 %s", globalIPv6)
+	}
+
 	out := engine.Env{}
 	out.Set("IP", ip.String())
-	out.Set("Mask", bridgeNetwork.Mask.String())
-	out.Set("Gateway", bridgeNetwork.IP.String())
+	out.Set("Mask", bridgeIPv4Network.Mask.String())
+	out.Set("Gateway", bridgeIPv4Network.IP.String())
 	out.Set("MacAddress", mac.String())
 	out.Set("Bridge", bridgeIface)
 
-	size, _ := bridgeNetwork.Mask.Size()
+	size, _ := bridgeIPv4Network.Mask.Size()
 	out.SetInt("IPPrefixLen", size)
 
+	// if linklocal IPv6
+	localIPv6Net, err := linkLocalIPv6FromMac(mac.String())
+	if err != nil {
+		return job.Error(err)
+	}
+	localIPv6, _, _ := net.ParseCIDR(localIPv6Net)
+	out.Set("LinkLocalIPv6", localIPv6.String())
+	out.Set("MacAddress", mac.String())
+
+	if globalIPv6Network != nil {
+		out.Set("GlobalIPv6", globalIPv6.String())
+		sizev6, _ := globalIPv6Network.Mask.Size()
+		out.SetInt("GlobalIPv6PrefixLen", sizev6)
+		out.Set("IPv6Gateway", bridgeIPv6Addr.String())
+	}
+
 	currentInterfaces.Set(id, &networkInterface{
-		IP: ip,
+		IP:   ip,
+		IPv6: globalIPv6,
 	})
 
 	out.WriteTo(job.Stdout)
@@ -417,8 +593,13 @@ func Release(job *engine.Job) engine.Status {
 		}
 	}
 
-	if err := ipallocator.ReleaseIP(bridgeNetwork, containerInterface.IP); err != nil {
-		log.Infof("Unable to release ip %s", err)
+	if err := ipallocator.ReleaseIP(bridgeIPv4Network, containerInterface.IP); err != nil {
+		log.Infof("Unable to release IPv4 %s", err)
+	}
+	if globalIPv6Network != nil {
+		if err := ipallocator.ReleaseIP(globalIPv6Network, containerInterface.IPv6); err != nil {
+			log.Infof("Unable to release IPv6 %s", err)
+		}
 	}
 	return engine.StatusOK
 }
@@ -501,35 +682,38 @@ func AllocatePort(job *engine.Job) engine.Status {
 func LinkContainers(job *engine.Job) engine.Status {
 	var (
 		action       = job.Args[0]
+		nfAction     iptables.Action
 		childIP      = job.Getenv("ChildIP")
 		parentIP     = job.Getenv("ParentIP")
 		ignoreErrors = job.GetenvBool("IgnoreErrors")
 		ports        = job.GetenvList("Ports")
 	)
-	for _, value := range ports {
-		port := nat.Port(value)
-		if output, err := iptables.Raw(action, "FORWARD",
-			"-i", bridgeIface, "-o", bridgeIface,
-			"-p", port.Proto(),
-			"-s", parentIP,
-			"--dport", strconv.Itoa(port.Int()),
-			"-d", childIP,
-			"-j", "ACCEPT"); !ignoreErrors && err != nil {
-			return job.Error(err)
-		} else if len(output) != 0 {
-			return job.Errorf("Error toggle iptables forward: %s", output)
-		}
 
-		if output, err := iptables.Raw(action, "FORWARD",
-			"-i", bridgeIface, "-o", bridgeIface,
-			"-p", port.Proto(),
-			"-s", childIP,
-			"--sport", strconv.Itoa(port.Int()),
-			"-d", parentIP,
-			"-j", "ACCEPT"); !ignoreErrors && err != nil {
+	switch action {
+	case "-A":
+		nfAction = iptables.Append
+	case "-I":
+		nfAction = iptables.Insert
+	case "-D":
+		nfAction = iptables.Delete
+	default:
+		return job.Errorf("Invalid action '%s' specified", action)
+	}
+
+	ip1 := net.ParseIP(parentIP)
+	if ip1 == nil {
+		return job.Errorf("parent IP '%s' is invalid", parentIP)
+	}
+	ip2 := net.ParseIP(childIP)
+	if ip2 == nil {
+		return job.Errorf("child IP '%s' is invalid", childIP)
+	}
+
+	chain := iptables.Chain{Name: "DOCKER", Bridge: bridgeIface}
+	for _, p := range ports {
+		port := nat.Port(p)
+		if err := chain.Link(nfAction, ip1, ip2, port.Int(), port.Proto()); !ignoreErrors && err != nil {
 			return job.Error(err)
-		} else if len(output) != 0 {
-			return job.Errorf("Error toggle iptables forward: %s", output)
 		}
 	}
 	return engine.StatusOK

+ 41 - 0
daemon/networkdriver/bridge/driver_test.go

@@ -7,6 +7,7 @@ import (
 
 	"github.com/docker/docker/daemon/networkdriver/portmapper"
 	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/iptables"
 )
 
 func init() {
@@ -118,3 +119,43 @@ func TestMacAddrGeneration(t *testing.T) {
 		t.Fatal("Non-unique MAC address")
 	}
 }
+
+func TestLinkContainers(t *testing.T) {
+	eng := engine.New()
+	eng.Logging = false
+
+	// Init driver
+	job := eng.Job("initdriver")
+	if res := InitDriver(job); res != engine.StatusOK {
+		t.Fatal("Failed to initialize network driver")
+	}
+
+	// Allocate interface
+	job = eng.Job("allocate_interface", "container_id")
+	if res := Allocate(job); res != engine.StatusOK {
+		t.Fatal("Failed to allocate network interface")
+	}
+
+	job.Args[0] = "-I"
+
+	job.Setenv("ChildIP", "172.17.0.2")
+	job.Setenv("ParentIP", "172.17.0.1")
+	job.SetenvBool("IgnoreErrors", false)
+	job.SetenvList("Ports", []string{"1234"})
+
+	bridgeIface = "lo"
+	_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res := LinkContainers(job); res != engine.StatusOK {
+		t.Fatalf("LinkContainers failed")
+	}
+
+	// flush rules
+	if _, err = iptables.Raw([]string{"-F", "DOCKER"}...); err != nil {
+		t.Fatal(err)
+	}
+
+}

+ 0 - 1
daemon/networkdriver/ipallocator/allocator.go

@@ -121,7 +121,6 @@ func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) {
 
 	// Register the IP.
 	allocated.p[ip.String()] = struct{}{}
-	allocated.last.Set(pos)
 
 	return ip, nil
 }

+ 1 - 1
daemon/networkdriver/portmapper/mapper.go

@@ -93,7 +93,7 @@ func Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err er
 	}
 
 	containerIP, containerPort := getIPAndPort(m.container)
-	if err := forward(iptables.Add, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
+	if err := forward(iptables.Append, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
 		return nil, err
 	}
 

+ 16 - 11
daemon/networkdriver/utils.go

@@ -44,11 +44,13 @@ func CheckRouteOverlaps(toCheck *net.IPNet) error {
 
 // Detects overlap between one IPNet and another
 func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool {
-	if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) {
-		return true
-	}
-	if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) {
-		return true
+	if len(netX.IP) == len(netY.IP) {
+		if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) {
+			return true
+		}
+		if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) {
+			return true
+		}
 	}
 	return false
 }
@@ -72,31 +74,34 @@ func NetworkRange(network *net.IPNet) (net.IP, net.IP) {
 	return netIP.Mask(network.Mask), net.IP(lastIP)
 }
 
-// Return the IPv4 address of a network interface
-func GetIfaceAddr(name string) (net.Addr, error) {
+// Return the first IPv4 address and slice of IPv6 addresses for the specified network interface
+func GetIfaceAddr(name string) (net.Addr, []net.Addr, error) {
 	iface, err := net.InterfaceByName(name)
 	if err != nil {
-		return nil, err
+		return nil, nil, err
 	}
 	addrs, err := iface.Addrs()
 	if err != nil {
-		return nil, err
+		return nil, nil, err
 	}
 	var addrs4 []net.Addr
+	var addrs6 []net.Addr
 	for _, addr := range addrs {
 		ip := (addr.(*net.IPNet)).IP
 		if ip4 := ip.To4(); ip4 != nil {
 			addrs4 = append(addrs4, addr)
+		} else if ip6 := ip.To16(); len(ip6) == net.IPv6len {
+			addrs6 = append(addrs6, addr)
 		}
 	}
 	switch {
 	case len(addrs4) == 0:
-		return nil, fmt.Errorf("Interface %v has no IP addresses", name)
+		return nil, nil, fmt.Errorf("Interface %v has no IPv4 addresses", name)
 	case len(addrs4) > 1:
 		fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n",
 			name, (addrs4[0].(*net.IPNet)).IP)
 	}
-	return addrs4[0], nil
+	return addrs4[0], addrs6, nil
 }
 
 func GetDefaultRouteIface() (*net.Interface, error) {

+ 34 - 0
daemon/rename.go

@@ -0,0 +1,34 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status {
+	if len(job.Args) != 2 {
+		return job.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name)
+	}
+	oldName := job.Args[0]
+	newName := job.Args[1]
+
+	container := daemon.Get(oldName)
+	if container == nil {
+		return job.Errorf("No such container: %s", oldName)
+	}
+
+	oldName = container.Name
+
+	container.Lock()
+	defer container.Unlock()
+	if _, err := daemon.reserveName(container.ID, newName); err != nil {
+		return job.Errorf("Error when allocating new name: %s", err)
+	}
+
+	container.Name = newName
+
+	if err := daemon.containerGraph.Delete(oldName); err != nil {
+		return job.Errorf("Failed to delete container %q: %v", oldName, err)
+	}
+
+	return engine.StatusOK
+}

+ 10 - 2
daemon/start.go

@@ -22,6 +22,10 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
 		return job.Errorf("No such container: %s", name)
 	}
 
+	if container.IsPaused() {
+		return job.Errorf("Cannot start a paused container, try unpause instead.")
+	}
+
 	if container.IsRunning() {
 		return job.Errorf("Container already started")
 	}
@@ -44,9 +48,13 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
 }
 
 func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
+	container.Lock()
+	defer container.Unlock()
 	if err := parseSecurityOpt(container, hostConfig); err != nil {
 		return err
 	}
+
+	// FIXME: this should be handled by the volume subsystem
 	// Validate the HostConfig binds. Make sure that:
 	// the source exists
 	for _, bind := range hostConfig.Binds {
@@ -66,8 +74,8 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
 	if err := daemon.RegisterLinks(container, hostConfig); err != nil {
 		return err
 	}
-	container.SetHostConfig(hostConfig)
-	container.ToDisk()
+	container.hostConfig = hostConfig
+	container.toDisk()
 
 	return nil
 }

+ 1 - 1
daemon/state_test.go

@@ -49,7 +49,7 @@ func TestStateRunStop(t *testing.T) {
 			atomic.StoreInt64(&exit, int64(exitCode))
 			close(stopped)
 		}()
-		s.SetStopped(&execdriver.ExitStatus{i, false})
+		s.SetStopped(&execdriver.ExitStatus{ExitCode: i})
 		if s.IsRunning() {
 			t.Fatal("State is running")
 		}

+ 98 - 0
daemon/stats.go

@@ -0,0 +1,98 @@
+package daemon
+
+import (
+	"encoding/json"
+
+	"github.com/docker/docker/api/stats"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/engine"
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/cgroups"
+)
+
+func (daemon *Daemon) ContainerStats(job *engine.Job) engine.Status {
+	updates, err := daemon.SubscribeToContainerStats(job.Args[0])
+	if err != nil {
+		return job.Error(err)
+	}
+	enc := json.NewEncoder(job.Stdout)
+	for v := range updates {
+		update := v.(*execdriver.ResourceStats)
+		ss := convertToAPITypes(update.ContainerStats)
+		ss.MemoryStats.Limit = uint64(update.MemoryLimit)
+		ss.Read = update.Read
+		ss.CpuStats.SystemUsage = update.SystemUsage
+		if err := enc.Encode(ss); err != nil {
+			// TODO: handle the specific broken pipe
+			daemon.UnsubscribeToContainerStats(job.Args[0], updates)
+			return job.Error(err)
+		}
+	}
+	return engine.StatusOK
+}
+
+// convertToAPITypes converts the libcontainer.ContainerStats to the api specific
+// structs.  This is done to preserve API compatibility and versioning.
+func convertToAPITypes(ls *libcontainer.ContainerStats) *stats.Stats {
+	s := &stats.Stats{}
+	if ls.NetworkStats != nil {
+		s.Network = stats.Network{
+			RxBytes:   ls.NetworkStats.RxBytes,
+			RxPackets: ls.NetworkStats.RxPackets,
+			RxErrors:  ls.NetworkStats.RxErrors,
+			RxDropped: ls.NetworkStats.RxDropped,
+			TxBytes:   ls.NetworkStats.TxBytes,
+			TxPackets: ls.NetworkStats.TxPackets,
+			TxErrors:  ls.NetworkStats.TxErrors,
+			TxDropped: ls.NetworkStats.TxDropped,
+		}
+	}
+	cs := ls.CgroupStats
+	if cs != nil {
+		s.BlkioStats = stats.BlkioStats{
+			IoServiceBytesRecursive: copyBlkioEntry(cs.BlkioStats.IoServiceBytesRecursive),
+			IoServicedRecursive:     copyBlkioEntry(cs.BlkioStats.IoServicedRecursive),
+			IoQueuedRecursive:       copyBlkioEntry(cs.BlkioStats.IoQueuedRecursive),
+			IoServiceTimeRecursive:  copyBlkioEntry(cs.BlkioStats.IoServiceTimeRecursive),
+			IoWaitTimeRecursive:     copyBlkioEntry(cs.BlkioStats.IoWaitTimeRecursive),
+			IoMergedRecursive:       copyBlkioEntry(cs.BlkioStats.IoMergedRecursive),
+			IoTimeRecursive:         copyBlkioEntry(cs.BlkioStats.IoTimeRecursive),
+			SectorsRecursive:        copyBlkioEntry(cs.BlkioStats.SectorsRecursive),
+		}
+		cpu := cs.CpuStats
+		s.CpuStats = stats.CpuStats{
+			CpuUsage: stats.CpuUsage{
+				TotalUsage:        cpu.CpuUsage.TotalUsage,
+				PercpuUsage:       cpu.CpuUsage.PercpuUsage,
+				UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode,
+				UsageInUsermode:   cpu.CpuUsage.UsageInUsermode,
+			},
+			ThrottlingData: stats.ThrottlingData{
+				Periods:          cpu.ThrottlingData.Periods,
+				ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods,
+				ThrottledTime:    cpu.ThrottlingData.ThrottledTime,
+			},
+		}
+		mem := cs.MemoryStats
+		s.MemoryStats = stats.MemoryStats{
+			Usage:    mem.Usage,
+			MaxUsage: mem.MaxUsage,
+			Stats:    mem.Stats,
+			Failcnt:  mem.Failcnt,
+		}
+	}
+	return s
+}
+
+func copyBlkioEntry(entries []cgroups.BlkioStatEntry) []stats.BlkioStatEntry {
+	out := make([]stats.BlkioStatEntry, len(entries))
+	for i, re := range entries {
+		out[i] = stats.BlkioStatEntry{
+			Major: re.Major,
+			Minor: re.Minor,
+			Op:    re.Op,
+			Value: re.Value,
+		}
+	}
+	return out
+}

+ 129 - 0
daemon/stats_collector.go

@@ -0,0 +1,129 @@
+package daemon
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/pubsub"
+	"github.com/docker/libcontainer/system"
+)
+
+// newStatsCollector returns a new statsCollector that collections
+// network and cgroup stats for a registered container at the specified
+// interval.  The collector allows non-running containers to be added
+// and will start processing stats when they are started.
+func newStatsCollector(interval time.Duration) *statsCollector {
+	s := &statsCollector{
+		interval:   interval,
+		publishers: make(map[*Container]*pubsub.Publisher),
+		clockTicks: uint64(system.GetClockTicks()),
+	}
+	go s.run()
+	return s
+}
+
+// statsCollector manages and provides container resource stats
+type statsCollector struct {
+	m          sync.Mutex
+	interval   time.Duration
+	clockTicks uint64
+	publishers map[*Container]*pubsub.Publisher
+}
+
+// collect registers the container with the collector and adds it to
+// the event loop for collection on the specified interval returning
+// a channel for the subscriber to receive on.
+func (s *statsCollector) collect(c *Container) chan interface{} {
+	s.m.Lock()
+	defer s.m.Unlock()
+	publisher, exists := s.publishers[c]
+	if !exists {
+		publisher = pubsub.NewPublisher(100*time.Millisecond, 1024)
+		s.publishers[c] = publisher
+	}
+	return publisher.Subscribe()
+}
+
+// stopCollection closes the channels for all subscribers and removes
+// the container from metrics collection.
+func (s *statsCollector) stopCollection(c *Container) {
+	s.m.Lock()
+	if publisher, exists := s.publishers[c]; exists {
+		publisher.Close()
+		delete(s.publishers, c)
+	}
+	s.m.Unlock()
+}
+
+// unsubscribe removes a specific subscriber from receiving updates for a container's stats.
+func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) {
+	s.m.Lock()
+	publisher := s.publishers[c]
+	if publisher != nil {
+		publisher.Evict(ch)
+		if publisher.Len() == 0 {
+			delete(s.publishers, c)
+		}
+	}
+	s.m.Unlock()
+}
+
+func (s *statsCollector) run() {
+	for _ = range time.Tick(s.interval) {
+		for container, publisher := range s.publishers {
+			systemUsage, err := s.getSystemCpuUsage()
+			if err != nil {
+				log.Errorf("collecting system cpu usage for %s: %v", container.ID, err)
+				continue
+			}
+			stats, err := container.Stats()
+			if err != nil {
+				if err != execdriver.ErrNotRunning {
+					log.Errorf("collecting stats for %s: %v", container.ID, err)
+				}
+				continue
+			}
+			stats.SystemUsage = systemUsage
+			publisher.Publish(stats)
+		}
+	}
+}
+
+const nanoSeconds = 1e9
+
+// getSystemCpuUSage returns the host system's cpu usage in nanoseconds
+// for the system to match the cgroup readings are returned in the same format.
+func (s *statsCollector) getSystemCpuUsage() (uint64, error) {
+	f, err := os.Open("/proc/stat")
+	if err != nil {
+		return 0, err
+	}
+	defer f.Close()
+	sc := bufio.NewScanner(f)
+	for sc.Scan() {
+		parts := strings.Fields(sc.Text())
+		switch parts[0] {
+		case "cpu":
+			if len(parts) < 8 {
+				return 0, fmt.Errorf("invalid number of cpu fields")
+			}
+			var sum uint64
+			for _, i := range parts[1:8] {
+				v, err := strconv.ParseUint(i, 10, 64)
+				if err != nil {
+					return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err)
+				}
+				sum += v
+			}
+			return (sum * nanoSeconds) / s.clockTicks, nil
+		}
+	}
+	return 0, fmt.Errorf("invalid stat format")
+}

+ 1 - 32
daemon/utils_test.go

@@ -16,7 +16,7 @@ func TestMergeLxcConfig(t *testing.T) {
 
 	out, err := mergeLxcConfIntoOptions(hostConfig)
 	if err != nil {
-		t.Fatalf("Failed to merge Lxc Config ", err)
+		t.Fatalf("Failed to merge Lxc Config: %s", err)
 	}
 
 	cpuset := out[0]
@@ -24,34 +24,3 @@ func TestMergeLxcConfig(t *testing.T) {
 		t.Fatalf("expected %s got %s", expected, cpuset)
 	}
 }
-
-func TestRemoveLocalDns(t *testing.T) {
-	ns0 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\n"
-
-	if result := utils.RemoveLocalDns([]byte(ns0)); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed No Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	ns1 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\nnameserver 127.0.0.1\n"
-	if result := utils.RemoveLocalDns([]byte(ns1)); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	ns1 = "nameserver 10.16.60.14\nnameserver 127.0.0.1\nnameserver 10.16.60.21\n"
-	if result := utils.RemoveLocalDns([]byte(ns1)); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	ns1 = "nameserver 127.0.1.1\nnameserver 10.16.60.14\nnameserver 10.16.60.21\n"
-	if result := utils.RemoveLocalDns([]byte(ns1)); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-}

+ 63 - 36
daemon/volumes.go

@@ -119,8 +119,23 @@ func (container *Container) VolumePaths() map[string]struct{} {
 }
 
 func (container *Container) registerVolumes() {
-	for _, mnt := range container.VolumeMounts() {
-		mnt.volume.AddContainer(container.ID)
+	for path := range container.VolumePaths() {
+		if v := container.daemon.volumes.Get(path); v != nil {
+			v.AddContainer(container.ID)
+			continue
+		}
+
+		// if container was created with an old daemon, this volume may not be registered so we need to make sure it gets registered
+		writable := true
+		if rw, exists := container.VolumesRW[path]; exists {
+			writable = rw
+		}
+		v, err := container.daemon.volumes.FindOrCreateVolume(path, writable)
+		if err != nil {
+			log.Debugf("error registering volume %s: %v", path, err)
+			continue
+		}
+		v.AddContainer(container.ID)
 	}
 }
 
@@ -214,20 +229,61 @@ func parseBindMountSpec(spec string) (string, string, bool, error) {
 	return path, mountToPath, writable, nil
 }
 
+func parseVolumesFromSpec(spec string) (string, string, error) {
+	specParts := strings.SplitN(spec, ":", 2)
+	if len(specParts) == 0 {
+		return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec)
+	}
+
+	var (
+		id   = specParts[0]
+		mode = "rw"
+	)
+	if len(specParts) == 2 {
+		mode = specParts[1]
+		if !validMountMode(mode) {
+			return "", "", fmt.Errorf("invalid mode for volumes-from: %s", mode)
+		}
+	}
+	return id, mode, nil
+}
+
 func (container *Container) applyVolumesFrom() error {
 	volumesFrom := container.hostConfig.VolumesFrom
+	if len(volumesFrom) > 0 && container.AppliedVolumesFrom == nil {
+		container.AppliedVolumesFrom = make(map[string]struct{})
+	}
 
-	mountGroups := make([]map[string]*Mount, 0, len(volumesFrom))
+	mountGroups := make(map[string][]*Mount)
 
 	for _, spec := range volumesFrom {
-		mountGroup, err := parseVolumesFromSpec(container.daemon, spec)
+		id, mode, err := parseVolumesFromSpec(spec)
 		if err != nil {
 			return err
 		}
-		mountGroups = append(mountGroups, mountGroup)
+		if _, exists := container.AppliedVolumesFrom[id]; exists {
+			// Don't try to apply these since they've already been applied
+			continue
+		}
+
+		c := container.daemon.Get(id)
+		if c == nil {
+			return fmt.Errorf("container %s not found, impossible to mount its volumes", id)
+		}
+
+		var (
+			fromMounts = c.VolumeMounts()
+			mounts     []*Mount
+		)
+
+		for _, mnt := range fromMounts {
+			mnt.Writable = mnt.Writable && (mode == "rw")
+			mounts = append(mounts, mnt)
+		}
+		mountGroups[id] = mounts
 	}
 
-	for _, mounts := range mountGroups {
+	for id, mounts := range mountGroups {
 		for _, mnt := range mounts {
 			mnt.from = mnt.container
 			mnt.container = container
@@ -235,6 +291,7 @@ func (container *Container) applyVolumesFrom() error {
 				return err
 			}
 		}
+		container.AppliedVolumesFrom[id] = struct{}{}
 	}
 	return nil
 }
@@ -284,36 +341,6 @@ func (container *Container) setupMounts() error {
 	return nil
 }
 
-func parseVolumesFromSpec(daemon *Daemon, spec string) (map[string]*Mount, error) {
-	specParts := strings.SplitN(spec, ":", 2)
-	if len(specParts) == 0 {
-		return nil, fmt.Errorf("Malformed volumes-from specification: %s", spec)
-	}
-
-	c := daemon.Get(specParts[0])
-	if c == nil {
-		return nil, fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0])
-	}
-
-	mounts := c.VolumeMounts()
-
-	if len(specParts) == 2 {
-		mode := specParts[1]
-		if !validMountMode(mode) {
-			return nil, fmt.Errorf("Invalid mode for volumes-from: %s", mode)
-		}
-
-		// Set the mode for the inheritted volume
-		for _, mnt := range mounts {
-			// Ensure that if the inherited volume is not writable, that we don't make
-			// it writable here
-			mnt.Writable = mnt.Writable && (mode == "rw")
-		}
-	}
-
-	return mounts, nil
-}
-
 func (container *Container) VolumeMounts() map[string]*Mount {
 	mounts := make(map[string]*Mount)
 

+ 53 - 2
docker/daemon.go

@@ -3,6 +3,11 @@
 package main
 
 import (
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builtins"
@@ -14,16 +19,59 @@ import (
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
 )
 
 const CanDaemon = true
 
 var (
-	daemonCfg = &daemon.Config{}
+	daemonCfg   = &daemon.Config{}
+	registryCfg = &registry.Options{}
 )
 
 func init() {
 	daemonCfg.InstallFlags()
+	registryCfg.InstallFlags()
+}
+
+func migrateKey() (err error) {
+	// Migrate trust key if exists at ~/.docker/key.json and owned by current user
+	oldPath := filepath.Join(getHomeDir(), ".docker", defaultTrustKeyFile)
+	newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile)
+	if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && utils.IsFileOwner(oldPath) {
+		defer func() {
+			// Ensure old path is removed if no error occurred
+			if err == nil {
+				err = os.Remove(oldPath)
+			} else {
+				log.Warnf("Key migration failed, key file not removed at %s", oldPath)
+			}
+		}()
+
+		if err := os.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil {
+			return fmt.Errorf("Unable to create daemon configuration directory: %s", err)
+		}
+
+		newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
+		if err != nil {
+			return fmt.Errorf("error creating key file %q: %s", newPath, err)
+		}
+		defer newFile.Close()
+
+		oldFile, err := os.Open(oldPath)
+		if err != nil {
+			return fmt.Errorf("error opening key file %q: %s", oldPath, err)
+		}
+		defer oldFile.Close()
+
+		if _, err := io.Copy(newFile, oldFile); err != nil {
+			return fmt.Errorf("error copying key: %s", err)
+		}
+
+		log.Infof("Migrated key from %s to %s", oldPath, newPath)
+	}
+
+	return nil
 }
 
 func mainDaemon() {
@@ -34,6 +82,9 @@ func mainDaemon() {
 	eng := engine.New()
 	signal.Trap(eng.Shutdown)
 
+	if err := migrateKey(); err != nil {
+		log.Fatal(err)
+	}
 	daemonCfg.TrustKeyPath = *flTrustKey
 
 	// Load builtins
@@ -42,7 +93,7 @@ func mainDaemon() {
 	}
 
 	// load registry service
-	if err := registry.NewService(daemonCfg.InsecureRegistries).Install(eng); err != nil {
+	if err := registry.NewService(registryCfg).Install(eng); err != nil {
 		log.Fatal(err)
 	}
 

+ 4 - 2
docker/docker.go

@@ -67,6 +67,8 @@ func main() {
 		flHosts = append(flHosts, defaultHost)
 	}
 
+	setDefaultConfFlag(flTrustKey, defaultTrustKeyFile)
+
 	if *flDaemon {
 		mainDaemon()
 		return
@@ -118,9 +120,9 @@ func main() {
 	}
 
 	if *flTls || *flTlsVerify {
-		cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, nil, protoAddrParts[0], protoAddrParts[1], &tlsConfig)
+		cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], &tlsConfig)
 	} else {
-		cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, nil, protoAddrParts[0], protoAddrParts[1], nil)
+		cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], nil)
 	}
 
 	if err := cli.Cmd(flag.Args()...); err != nil {

+ 29 - 7
docker/flags.go

@@ -28,14 +28,23 @@ func getHomeDir() string {
 	return os.Getenv("HOME")
 }
 
+func getDaemonConfDir() string {
+	// TODO: update for Windows daemon
+	if runtime.GOOS == "windows" {
+		return filepath.Join(os.Getenv("USERPROFILE"), ".docker")
+	}
+	return "/etc/docker"
+}
+
 var (
 	flVersion     = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit")
 	flDaemon      = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode")
 	flDebug       = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode")
 	flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group")
-	flLogLevel    = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level")
+	flLogLevel    = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level (debug, info, warn, error, fatal)")
 	flEnableCors  = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
 	flTls         = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by --tlsverify flag")
+	flHelp        = flag.Bool([]string{"h", "-help"}, false, "Print usage")
 	flTlsVerify   = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)")
 
 	// these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs
@@ -46,10 +55,20 @@ var (
 	flHosts    []string
 )
 
+func setDefaultConfFlag(flag *string, def string) {
+	if *flag == "" {
+		if *flDaemon {
+			*flag = filepath.Join(getDaemonConfDir(), def)
+		} else {
+			*flag = filepath.Join(getHomeDir(), ".docker", def)
+		}
+	}
+}
+
 func init() {
-	// placeholder for trust key flag
-	trustKeyDefault := filepath.Join(dockerCertPath, defaultTrustKeyFile)
-	flTrustKey = &trustKeyDefault
+	var placeholderTrustKey string
+	// TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file")
+	flTrustKey = &placeholderTrustKey
 
 	flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust only remotes providing a certificate signed by the CA given here")
 	flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file")
@@ -57,8 +76,9 @@ func init() {
 	opts.HostListVar(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.")
 
 	flag.Usage = func() {
-		fmt.Fprint(os.Stderr, "Usage: docker [OPTIONS] COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nOptions:\n")
+		fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nOptions:\n")
 
+		flag.CommandLine.SetOutput(os.Stdout)
 		flag.PrintDefaults()
 
 		help := "\nCommands:\n"
@@ -77,7 +97,7 @@ func init() {
 			{"images", "List images"},
 			{"import", "Create a new filesystem image from the contents of a tarball"},
 			{"info", "Display system-wide information"},
-			{"inspect", "Return low-level information on a container"},
+			{"inspect", "Return low-level information on a container or image"},
 			{"kill", "Kill a running container"},
 			{"load", "Load an image from a tar archive"},
 			{"login", "Register or log in to a Docker registry server"},
@@ -88,6 +108,7 @@ func init() {
 			{"ps", "List containers"},
 			{"pull", "Pull an image or a repository from a Docker registry server"},
 			{"push", "Push an image or a repository to a Docker registry server"},
+			{"rename", "Rename an existing container"},
 			{"restart", "Restart a running container"},
 			{"rm", "Remove one or more containers"},
 			{"rmi", "Remove one or more images"},
@@ -95,6 +116,7 @@ func init() {
 			{"save", "Save an image to a tar archive"},
 			{"search", "Search for an image on the Docker Hub"},
 			{"start", "Start a stopped container"},
+			{"stats", "Display a live stream of one or more containers' resource usage statistics"},
 			{"stop", "Stop a running container"},
 			{"tag", "Tag an image into a repository"},
 			{"top", "Lookup the running processes of a container"},
@@ -105,6 +127,6 @@ func init() {
 			help += fmt.Sprintf("    %-10.10s%s\n", command[0], command[1])
 		}
 		help += "\nRun 'docker COMMAND --help' for more information on a command."
-		fmt.Fprintf(os.Stderr, "%s\n", help)
+		fmt.Fprintf(os.Stdout, "%s\n", help)
 	}
 }

Vissa filer visades inte eftersom för många filer har ändrats