Browse Source

Merge pull request #7644 from vieux/bump_v1.2.0

Bump v1.2.0
Victor Vieux 11 years ago
parent
commit
7649264eb8
100 changed files with 3304 additions and 1463 deletions
  1. 22 3
      .mailmap
  2. 14 2
      .travis.yml
  3. 178 35
      AUTHORS
  4. 27 8
      CHANGELOG.md
  5. 153 41
      CONTRIBUTING.md
  6. 14 8
      Dockerfile
  7. 0 24
      FIXME
  8. 4 3
      Makefile
  9. 5 3
      README.md
  10. 1 1
      VERSION
  11. 2 2
      api/README.md
  12. 18 17
      api/client/cli.go
  13. 166 100
      api/client/commands.go
  14. 12 11
      api/client/hijack.go
  15. 10 9
      api/client/utils.go
  16. 7 6
      api/common.go
  17. 2 1
      api/server/MAINTAINERS
  18. 51 37
      api/server/server.go
  19. 219 17
      api/server/server_unit_test.go
  20. 2 1
      archive/MAINTAINERS
  21. 65 39
      archive/archive.go
  22. 45 2
      archive/archive_test.go
  23. 10 7
      archive/changes.go
  24. 4 0
      archive/common.go
  25. 5 2
      archive/diff.go
  26. 1 1
      archive/wrap.go
  27. 12 12
      builtins/builtins.go
  28. 17 0
      contrib/check-config.sh
  29. 35 4
      contrib/completion/bash/docker
  30. 1 1
      contrib/completion/fish/docker.fish
  31. 234 61
      contrib/completion/zsh/_docker
  32. 0 125
      contrib/crashTest.go
  33. 1 1
      contrib/desktop-integration/iceweasel/Dockerfile
  34. 1 1
      contrib/docker-device-tool/device_tool.go
  35. 1 1
      contrib/host-integration/Dockerfile.dev
  36. 1 1
      contrib/host-integration/manager.go
  37. 1 1
      contrib/host-integration/manager.sh
  38. 2 0
      contrib/init/systemd/MAINTAINERS
  39. 4 4
      contrib/init/systemd/docker.service
  40. 3 0
      contrib/init/systemd/docker.socket
  41. 0 13
      contrib/init/systemd/socket-activation/docker.service
  42. 2 3
      contrib/init/sysvinit-debian/docker
  43. 1 1
      contrib/init/sysvinit-redhat/docker
  44. 2 2
      contrib/mkimage-alpine.sh
  45. 22 3
      contrib/mkimage-arch.sh
  46. 1 1
      contrib/mkimage-debootstrap.sh
  47. 20 3
      contrib/mkimage/debootstrap
  48. 64 0
      contrib/nuke-graph-directory.sh
  49. 0 10
      contrib/prepare-commit-msg.hook
  50. 6 0
      daemon/MAINTAINERS
  51. 129 16
      daemon/attach.go
  52. 129 31
      daemon/build.go
  53. 32 0
      daemon/changes.go
  54. 84 0
      daemon/commit.go
  55. 70 0
      daemon/config.go
  56. 94 128
      daemon/container.go
  57. 36 1
      daemon/container_unit_test.go
  58. 33 0
      daemon/copy.go
  59. 86 0
      daemon/create.go
  60. 280 256
      daemon/daemon.go
  61. 5 5
      daemon/daemon_aufs.go
  62. 1 1
      daemon/daemon_btrfs.go
  63. 1 1
      daemon/daemon_devicemapper.go
  64. 1 1
      daemon/daemon_no_aufs.go
  65. 174 0
      daemon/delete.go
  66. 3 41
      daemon/execdriver/driver.go
  67. 5 5
      daemon/execdriver/execdrivers/execdrivers.go
  68. 115 39
      daemon/execdriver/lxc/driver.go
  69. 103 6
      daemon/execdriver/lxc/init.go
  70. 23 7
      daemon/execdriver/lxc/lxc_init_linux.go
  71. 2 2
      daemon/execdriver/lxc/lxc_init_unsupported.go
  72. 3 3
      daemon/execdriver/lxc/lxc_template.go
  73. 5 3
      daemon/execdriver/lxc/lxc_template_unit_test.go
  74. 1 1
      daemon/execdriver/native/configuration/parse.go
  75. 1 1
      daemon/execdriver/native/configuration/parse_test.go
  76. 14 3
      daemon/execdriver/native/create.go
  77. 82 51
      daemon/execdriver/native/driver.go
  78. 13 0
      daemon/execdriver/native/driver_unsupported.go
  79. 13 0
      daemon/execdriver/native/driver_unsupported_nocgo.go
  80. 2 0
      daemon/execdriver/native/info.go
  81. 66 0
      daemon/execdriver/native/init.go
  82. 2 0
      daemon/execdriver/native/template/default_template.go
  83. 0 42
      daemon/execdriver/native/term.go
  84. 0 80
      daemon/execdriver/termconsole.go
  85. 63 0
      daemon/execdriver/utils.go
  86. 30 0
      daemon/export.go
  87. 7 6
      daemon/graphdriver/aufs/aufs.go
  88. 2 2
      daemon/graphdriver/aufs/aufs_test.go
  89. 3 2
      daemon/graphdriver/aufs/mount.go
  90. 0 2
      daemon/graphdriver/aufs/mount_linux.go
  91. 1 1
      daemon/graphdriver/aufs/mount_unsupported.go
  92. 3 3
      daemon/graphdriver/btrfs/btrfs.go
  93. 1 1
      daemon/graphdriver/btrfs/btrfs_test.go
  94. 1 1
      daemon/graphdriver/btrfs/dummy_unsupported.go
  95. 22 9
      daemon/graphdriver/devmapper/README.md
  96. 11 11
      daemon/graphdriver/devmapper/attach_loopback.go
  97. 62 51
      daemon/graphdriver/devmapper/deviceset.go
  98. 19 19
      daemon/graphdriver/devmapper/devmapper.go
  99. 1 1
      daemon/graphdriver/devmapper/devmapper_log.go
  100. 2 2
      daemon/graphdriver/devmapper/devmapper_test.go

+ 22 - 3
.mailmap

@@ -1,4 +1,9 @@
-# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf
+# Generate AUTHORS: hack/generate-authors.sh
+
+# Tip for finding duplicates (besides scanning the output of AUTHORS for name
+# duplicates that aren't also email duplicates): scan the output of:
+#   git log --format='%aE - %aN' | sort -uf
+
 <charles.hooper@dotcloud.com> <chooper@plumata.com>
 <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
 <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
@@ -47,8 +52,9 @@ Shih-Yuan Lee <fourdollars@gmail.com>
 Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
 Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
 <proppy@google.com> <proppy@aminche.com>
-<michael@crosbymichael.com> <crosby.michael@gmail.com>
-<github@metaliveblog.com> <github@developersupport.net>
+<michael@docker.com> <michael@crosbymichael.com>
+<michael@docker.com> <crosby.michael@gmail.com>
+<github@developersupport.net> <github@metaliveblog.com> 
 <brandon@ifup.org> <brandon@ifup.co>
 <dano@spotify.com> <daniel.norberg@gmail.com>
 <danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
@@ -78,3 +84,16 @@ Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
 Liang-Chi Hsieh <viirya@gmail.com>
 Aleksa Sarai <cyphar@cyphar.com>
 Will Weaver <monkey@buildingbananas.com>
+Timothy Hobbs <timothyhobbs@seznam.cz>
+Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
+Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
+<github@hollensbe.org> <erik+github@hollensbe.org>
+<github@albersweb.de> <albers@users.noreply.github.com>
+<lsm5@fedoraproject.org> <lsm5@redhat.com>
+<marc@marc-abramowitz.com> <msabramo@gmail.com>
+Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
+<bernat@luffy.cx> <vincent@bernat.im>
+<p@pwaller.net> <peter@scraperwiki.com>
+<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
+Francisco Carriedo <fcarriedo@gmail.com>
+<julienbordellier@gmail.com> <git@julienbordellier.com>

+ 14 - 2
.travis.yml

@@ -3,10 +3,20 @@
 
 language: go
 
-go: 1.2
+go:
+# This should match the version in the Dockerfile.
+  - 1.3.1
+# Test against older versions too, just for a little extra retrocompat.
+  - 1.2
+
+# Let us have pretty experimental Docker-based Travis workers.
+# (These spin up much faster than the VM-based ones.)
+sudo: false
 
 # Disable the normal go build.
-install: true
+install:
+  - export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false")
+  - export AUTO_GOPATH=1
 
 before_script:
   - env | sort
@@ -14,5 +24,7 @@ before_script:
 script:
   - hack/make.sh validate-dco
   - hack/make.sh validate-gofmt
+  - ./hack/make.sh dynbinary
+  - DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary
 
 # vim:set sw=2 ts=2:

+ 178 - 35
AUTHORS

@@ -1,5 +1,5 @@
 # This file lists all individuals having contributed content to the repository.
-# For how it is generated, see `.mailmap`.
+# For how it is generated, see `hack/generate-authors.sh`.
 
 Aanand Prasad <aanand.prasad@gmail.com>
 Aaron Feng <aaron.feng@gmail.com>
@@ -9,33 +9,43 @@ Adam Miller <admiller@redhat.com>
 Adam Singer <financeCoding@gmail.com>
 Aditya <aditya@netroy.in>
 Adrian Mouat <adrian.mouat@gmail.com>
+Adrien Folie <folie.adrien@gmail.com>
+AJ Bowen <aj@gandi.net>
+Al Tobey <al@ooyala.com>
 alambike <alambike@gmail.com>
+Albert Zhang <zhgwenming@gmail.com>
 Aleksa Sarai <cyphar@cyphar.com>
+Alex Gaynor <alex.gaynor@gmail.com>
+Alex Warhawk <ax.warhawk@gmail.com>
 Alexander Larsson <alexl@redhat.com>
+Alexander Shopov <ash@kambanaria.org>
 Alexandr Morozov <lk4d4math@gmail.com>
 Alexey Kotlyarov <alexey@infoxchange.net.au>
 Alexey Shamrin <shamrin@gmail.com>
-Alex Gaynor <alex.gaynor@gmail.com>
 Alexis THOMAS <fr.alexisthomas@gmail.com>
 almoehi <almoehi@users.noreply.github.com>
-Al Tobey <al@ooyala.com>
 amangoel <amangoel@gmail.com>
+AnandkumarPatel <anandkumarpatel@gmail.com>
+Andre Dublin <81dublin@gmail.com>
 Andrea Luzzardi <aluzzardi@gmail.com>
+Andrea Turli <andrea.turli@gmail.com>
 Andreas Savvides <andreas@editd.com>
 Andreas Tiefenthaler <at@an-ti.eu>
-Andrea Turli <andrea.turli@gmail.com>
 Andrew Duckworth <grillopress@gmail.com>
+Andrew France <andrew@avito.co.uk>
 Andrew Macgregor <andrew.macgregor@agworld.com.au>
 Andrew Munsell <andrew@wizardapps.net>
-Andrews Medina <andrewsmedina@gmail.com>
+Andrew Weiss <andrew.weiss@outlook.com>
 Andrew Williams <williams.andrew@gmail.com>
+Andrews Medina <andrewsmedina@gmail.com>
 Andy Chambers <anchambers@paypal.com>
 andy diller <dillera@gmail.com>
 Andy Goldstein <agoldste@redhat.com>
 Andy Kipp <andy@rstudio.com>
-Andy Rothfusz <github@metaliveblog.com>
+Andy Rothfusz <github@developersupport.net>
 Andy Smith <github@anarkystic.com>
 Anthony Bishopric <git@anthonybishopric.com>
+Anton Löfgren <anton.lofgren@gmail.com>
 Anton Nikitin <anton.k.nikitin@gmail.com>
 Antony Messerli <amesserl@rackspace.com>
 apocas <petermdias@gmail.com>
@@ -44,25 +54,34 @@ Asbjørn Enge <asbjorn@hanafjedle.net>
 Barnaby Gray <barnaby@pickle.me.uk>
 Barry Allard <barry.allard@gmail.com>
 Bartłomiej Piotrowski <b@bpiotrowski.pl>
-Benjamin Atkin <ben@benatkin.com>
-Benoit Chesneau <bchesneau@gmail.com>
+bdevloed <boris.de.vloed@gmail.com>
+Ben Firshman <ben@firshman.co.uk>
 Ben Sargent <ben@brokendigits.com>
 Ben Toews <mastahyeti@gmail.com>
 Ben Wiklund <ben@daisyowl.com>
+Benjamin Atkin <ben@benatkin.com>
+Benoit Chesneau <bchesneau@gmail.com>
 Bernerd Schaefer <bj.schaefer@gmail.com>
 Bhiraj Butala <abhiraj.butala@gmail.com>
 bin liu <liubin0329@users.noreply.github.com>
 Bouke Haarsma <bouke@webatoom.nl>
+Boyd Hemphill <boyd@feedmagnet.com>
 Brandon Liu <bdon@bdon.org>
 Brandon Philips <brandon@ifup.org>
+Brandon Rhodes <brandon@rhodesmill.org>
+Brett Kochendorfer <brett.kochendorfer@gmail.com>
+Brian (bex) Exelbierd <bexelbie@redhat.com>
 Brian Dorsey <brian@dorseys.org>
 Brian Flad <bflad417@gmail.com>
 Brian Goff <cpuguy83@gmail.com>
 Brian McCallister <brianm@skife.org>
 Brian Olsen <brian@maven-group.org>
 Brian Shumate <brian@couchbase.com>
+Brice Jaglin <bjaglin@teads.tv>
 Briehan Lombaard <briehan.lombaard@gmail.com>
 Bruno Bigras <bigras.bruno@gmail.com>
+Bruno Renié <brutasse@gmail.com>
+Bryan Bess <squarejaw@bsbess.com>
 Bryan Matsuo <bryan.matsuo@gmail.com>
 Bryan Murphy <bmurphy1976@gmail.com>
 Caleb Spare <cespare@gmail.com>
@@ -73,19 +92,35 @@ Charles Hooper <charles.hooper@dotcloud.com>
 Charles Lindsay <chaz@chazomatic.us>
 Charles Merriam <charles.merriam@gmail.com>
 Charlie Lewis <charliel@lab41.org>
+Chewey <prosto-chewey@users.noreply.github.com>
 Chia-liang Kao <clkao@clkao.org>
+Chris Alfonso <calfonso@redhat.com>
+Chris Snow <chsnow123@gmail.com>
 Chris St. Pierre <chris.a.st.pierre@gmail.com>
+chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
+Christian Berendt <berendt@b1-systems.de>
+ChristoperBiscardi <biscarch@sketcht.com>
+Christophe Troestler <christophe.Troestler@umons.ac.be>
 Christopher Currie <codemonkey+github@gmail.com>
 Christopher Rigor <crigor@gmail.com>
-Christophe Troestler <christophe.Troestler@umons.ac.be>
+Ciro S. Costa <ciro.costa@usp.br>
 Clayton Coleman <ccoleman@redhat.com>
 Colin Dunklau <colin.dunklau@gmail.com>
 Colin Rice <colin@daedrum.net>
+Colin Walters <walters@verbum.org>
 Cory Forsyth <cory.forsyth@gmail.com>
+cpuguy83 <cpuguy83@gmail.com>
 cressie176 <github@stephen-cresswell.net>
+Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
+Daan van Berkel <daan.v.berkel.1980@gmail.com>
 Dafydd Crosby <dtcrsby@gmail.com>
 Dan Buch <d.buch@modcloth.com>
 Dan Hirsch <thequux@upstandinghackers.com>
+Dan Keder <dan.keder@gmail.com>
+Dan McPherson <dmcphers@redhat.com>
+Dan Stine <sw@stinemail.com>
+Dan Walsh <dwalsh@redhat.com>
+Dan Williams <me@deedubs.com>
 Daniel Exner <dex@dragonslave.de>
 Daniel Garcia <daniel@danielgarcia.info>
 Daniel Gasienica <daniel@gasienica.ch>
@@ -95,22 +130,21 @@ Daniel Nordberg <dnordberg@gmail.com>
 Daniel Robinson <gottagetmac@gmail.com>
 Daniel Von Fange <daniel@leancoder.com>
 Daniel YC Lin <dlin.tw@gmail.com>
-Dan Keder <dan.keder@gmail.com>
-Dan McPherson <dmcphers@redhat.com>
+Daniel, Dao Quang Minh <dqminh89@gmail.com>
 Danny Berger <dpb587@gmail.com>
 Danny Yates <danny@codeaholics.org>
-Dan Stine <sw@stinemail.com>
-Dan Walsh <dwalsh@redhat.com>
-Dan Williams <me@deedubs.com>
 Darren Coxall <darren@darrencoxall.com>
 Darren Shepherd <darren.s.shepherd@gmail.com>
 David Anderson <dave@natulte.net>
 David Calavera <david.calavera@gmail.com>
+David Corking <dmc-source@dcorking.com>
 David Gageot <david@gageot.net>
 David Mcanulty <github@hellspark.com>
 David Röthlisberger <david@rothlis.net>
 David Sissitka <me@dsissitka.com>
 Deni Bertovic <deni@kset.org>
+Derek <crq@kernel.org>
+Deric Crago <deric.crago@gmail.com>
 Dinesh Subhraveti <dineshs@altiscale.com>
 Djibril Koné <kone.djibril@gmail.com>
 dkumor <daniel@dkumor.com>
@@ -118,8 +152,10 @@ Dmitry Demeshchuk <demeshchuk@gmail.com>
 Dolph Mathews <dolph.mathews@gmail.com>
 Dominik Honnef <dominik@honnef.co>
 Don Spaulding <donspauldingii@gmail.com>
-Dražen Lučanin <kermit666@gmail.com>
+Doug Davis <dug@us.ibm.com>
+doug tangren <d.tangren@gmail.com>
 Dr Nic Williams <drnicwilliams@gmail.com>
+Dražen Lučanin <kermit666@gmail.com>
 Dustin Sallings <dustin@spy.net>
 Edmund Wagner <edmund-wagner@web.de>
 Eiichi Tsukata <devel@etsukata.com>
@@ -130,13 +166,17 @@ Emily Rose <emily@contactvibe.com>
 Eric Hanchrow <ehanchrow@ine.com>
 Eric Lee <thenorthsecedes@gmail.com>
 Eric Myhre <hash@exultant.us>
-Erik Hollensbe <erik+github@hollensbe.org>
+Eric Windisch <eric@windisch.us>
+Eric Windisch <ewindisch@docker.com>
+Erik Hollensbe <github@hollensbe.org>
+Erik Inge Bolsø <knan@redpill-linpro.com>
 Erno Hopearuoho <erno.hopearuoho@gmail.com>
 eugenkrizo <eugen.krizo@gmail.com>
 Evan Hazlett <ejhazlett@gmail.com>
 Evan Krall <krall@yelp.com>
 Evan Phoenix <evan@fallingsnow.net>
 Evan Wies <evan@neomantra.net>
+evanderkoogh <info@erronis.nl>
 Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
 ezbercih <cem.ezberci@gmail.com>
 Fabio Falci <fabiofalci@gmail.com>
@@ -147,12 +187,16 @@ Fareed Dudhia <fareeddudhia@googlemail.com>
 Felix Rabe <felix@rabe.io>
 Fernando <fermayo@gmail.com>
 Flavio Castelli <fcastelli@suse.com>
+FLGMwt <ryan.stelly@live.com>
+Francisco Carriedo <fcarriedo@gmail.com>
 Francisco Souza <f@souza.cc>
 Frank Macreery <frank@macreery.com>
+Fred Lifton <fred.lifton@docker.com>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 Frederik Loeffert <frederik@zitrusmedia.de>
 Freek Kalter <freek@kalteronline.org>
 Gabe Rosenhouse <gabe@missionst.com>
+Gabor Nagy <mail@aigeruth.hu>
 Gabriel Monroy <gabriel@opdemand.com>
 Galen Sampson <galen.sampson@gmail.com>
 Gareth Rushgrove <gareth@morethanseven.net>
@@ -160,75 +204,106 @@ Geoffrey Bachelet <grosfrais@gmail.com>
 Gereon Frey <gereon.frey@dynport.de>
 German DZ <germ@ndz.com.ar>
 Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
+Giuseppe Mazzotta <gdm85@users.noreply.github.com>
+Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
+Glyn Normington <gnormington@gopivotal.com>
 Goffert van Gool <goffert@phusion.nl>
 Graydon Hoare <graydon@pobox.com>
 Greg Thornton <xdissent@me.com>
 grunny <mwgrunny@gmail.com>
+Guilherme Salgado <gsalgado@gmail.com>
 Guillaume J. Charmes <guillaume.charmes@docker.com>
 Gurjeet Singh <gurjeet@singh.im>
 Guruprasad <lgp171188@gmail.com>
+Harald Albers <github@albersweb.de>
 Harley Laue <losinggeneration@gmail.com>
 Hector Castro <hectcastro@gmail.com>
+Henning Sprang <henning.sprang@gmail.com>
 Hobofan <goisser94@gmail.com>
+Hollie Teal <hollie.teal@docker.com>
+Hollie Teal <hollietealok@users.noreply.github.com>
+hollietealok <hollie@docker.com>
 Hunter Blanks <hunter@twilio.com>
+hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
+Ian Babrou <ibobrik@gmail.com>
+Ian Bull <irbull@gmail.com>
+Ian Main <imain@redhat.com>
 Ian Truslove <ian.truslove@gmail.com>
 ILYA Khlopotov <ilya.khlopotov@gmail.com>
 inglesp <peter.inglesby@gmail.com>
 Isaac Dupree <antispam@idupree.com>
 Isabel Jimenez <contact.isabeljimenez@gmail.com>
 Isao Jonas <isao.jonas@gmail.com>
+Ivan Fraixedes <ifcdev@gmail.com>
 Jack Danger Canty <jackdanger@squareup.com>
-jakedt <jake@devtable.com>
 Jake Moshenko <jake@devtable.com>
+jakedt <jake@devtable.com>
 James Allen <jamesallen0108@gmail.com>
 James Carr <james.r.carr@gmail.com>
 James DeFelice <james.defelice@ishisystems.com>
 James Harrison Fisher <jameshfisher@gmail.com>
+James Kyle <james@jameskyle.org>
 James Mills <prologic@shortcircuit.net.au>
 James Turnbull <james@lovedthanlost.net>
+Jan Pazdziora <jpazdziora@redhat.com>
+Jan Toebes <jan@toebes.info>
+Jaroslaw Zabiello <hipertracker@gmail.com>
 jaseg <jaseg@jaseg.net>
+Jason Giedymin <jasong@apache.org>
+Jason Hall <imjasonh@gmail.com>
+Jason Livesay <ithkuil@gmail.com>
 Jason McVetta <jason.mcvetta@gmail.com>
 Jason Plum <jplum@devonit.com>
 Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
 Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
 Jeff Lindsay <progrium@gmail.com>
+Jeff Welch <whatthejeff@gmail.com>
+Jeffrey Bolle <jeffreybolle@gmail.com>
 Jeremy Grosser <jeremy@synack.me>
-Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
 Jesse Dubay <jesse@thefortytwo.net>
+Jezeniel Zapanta <jpzapanta22@gmail.com>
 Jilles Oldenbeuving <ojilles@gmail.com>
 Jim Alateras <jima@comware.com.au>
+Jim Perrin <jperrin@centos.org>
 Jimmy Cuadra <jimmy@jimmycuadra.com>
+Jiří Župka <jzupka@redhat.com>
 Joe Beda <joe.github@bedafamily.com>
-Joel Handwell <joelhandwell@gmail.com>
 Joe Shaw <joe@joeshaw.org>
 Joe Van Dyk <joe@tanga.com>
+Joel Handwell <joelhandwell@gmail.com>
 Joffrey F <joffrey@docker.com>
 Johan Euphrosine <proppy@google.com>
-Johannes 'fish' Ziemke <github@freigeist.org>
 Johan Rydberg <johan.rydberg@gmail.com>
+Johannes 'fish' Ziemke <github@freigeist.org>
 John Costa <john.costa@gmail.com>
 John Feminella <jxf@jxf.me>
 John Gardiner Myers <jgmyers@proofpoint.com>
+John OBrien III <jobrieniii@yahoo.com>
 John Warwick <jwarwick@gmail.com>
+Jon Wedaman <jweede@gmail.com>
 Jonas Pfenniger <jonas@pfenniger.name>
+Jonathan Boulle <jonathanboulle@gmail.com>
+Jonathan Camp <jonathan@irondojo.com>
 Jonathan McCrohan <jmccrohan@gmail.com>
 Jonathan Mueller <j.mueller@apoveda.ch>
 Jonathan Pares <jonathanpa@users.noreply.github.com>
 Jonathan Rudenberg <jonathan@titanous.com>
-Jon Wedaman <jweede@gmail.com>
 Joost Cassee <joost@cassee.net>
 Jordan Arentsen <blissdev@gmail.com>
 Jordan Sissel <jls@semicomplete.com>
 Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
 Joseph Hager <ajhager@gmail.com>
+Josh <jokajak@gmail.com>
 Josh Hawn <josh.hawn@docker.com>
 Josh Poimboeuf <jpoimboe@redhat.com>
 JP <jpellerin@leapfrogonline.com>
 Julien Barbier <write0@gmail.com>
+Julien Bordellier <julienbordellier@gmail.com>
 Julien Dubois <julien.dubois@gmail.com>
 Justin Force <justin.force@gmail.com>
 Justin Plock <jplock@users.noreply.github.com>
 Justin Simonelis <justin.p.simonelis@gmail.com>
+Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
 Karan Lyons <karan@karanlyons.com>
 Karl Grzeszczak <karlgrz@gmail.com>
 Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
@@ -236,36 +311,49 @@ Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
 Keli Hu <dev@keli.hu>
 Ken Cochrane <kencochrane@gmail.com>
 Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
+Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
 Kevin Clark <kevin.clark@gmail.com>
 Kevin J. Lynagh <kevin@keminglabs.com>
 Kevin Menard <kevin@nirvdrum.com>
 Kevin Wallace <kevin@pentabarf.net>
 Keyvan Fatehi <keyvanfatehi@gmail.com>
-kim0 <email.ahmedkamal@googlemail.com>
+kies <lleelm@gmail.com>
 Kim BKC Carlbacker <kim.carlbacker@gmail.com>
+kim0 <email.ahmedkamal@googlemail.com>
 Kimbro Staken <kstaken@kstaken.com>
 Kiran Gangadharan <kiran.daredevil@gmail.com>
+knappe <tyler.knappe@gmail.com>
+Kohei Tsuruta <coheyxyz@gmail.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
 Kyle Conroy <kyle.j.conroy@gmail.com>
+kyu <leehk1227@gmail.com>
+Lachlan Coote <lcoote@vmware.com>
 lalyos <lalyos@yahoo.com>
 Lance Chen <cyen0312@gmail.com>
 Lars R. Damerow <lars@pixar.com>
 Laurie Voss <github@seldo.com>
+leeplay <hyeongkyu.lee@navercorp.com>
+Len Weincier <len@cloudafrica.net>
+Levi Gross <levi@levigross.com>
 Lewis Peckover <lew+github@lew.io>
 Liang-Chi Hsieh <viirya@gmail.com>
-Lokesh Mandvekar <lsm5@redhat.com>
+Lokesh Mandvekar <lsm5@fedoraproject.org>
 Louis Opter <kalessin@kalessin.fr>
 lukaspustina <lukas.pustina@centerdevice.com>
 lukemarsden <luke@digital-crocus.com>
 Mahesh Tiyyagura <tmahesh@gmail.com>
+Manfred Zabarauskas <manfredas@zabarauskas.com>
 Manuel Meurer <manuel@krautcomputing.com>
 Manuel Woelker <github@manuel.woelker.org>
 Marc Abramowitz <marc@marc-abramowitz.com>
 Marc Kuo <kuomarc2@gmail.com>
+Marc Tamsky <mtamsky@gmail.com>
 Marco Hennings <marco.hennings@freiheit.com>
 Marcus Farkas <toothlessgear@finitebox.com>
 Marcus Ramberg <marcus@nordaaker.com>
+marcuslinke <marcus.linke@gmx.de>
 Marek Goldmann <marek.goldmann@gmail.com>
+Marius Voila <marius.voila@gmail.com>
 Mark Allen <mrallen1@yahoo.com>
 Mark McGranaghan <mmcgrana@gmail.com>
 Marko Mikulicic <mmikulicic@gmail.com>
@@ -278,30 +366,40 @@ Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
 Matt Apperson <me@mattapperson.com>
 Matt Bachmann <bachmann.matt@gmail.com>
 Matt Haggard <haggardii@gmail.com>
+Matthew Heon <mheon@redhat.com>
 Matthew Mueller <mattmuelle@gmail.com>
 Matthias Klumpp <matthias@tenstral.net>
 Matthias Kühnle <git.nivoc@neverbox.com>
 mattymo <raytrac3r@gmail.com>
-Maxime Petazzoni <max@signalfuse.com>
-Maxim Treskin <zerthurd@gmail.com>
+mattyw <mattyw@me.com>
 Max Shytikov <mshytikov@gmail.com>
+Maxim Treskin <zerthurd@gmail.com>
+Maxime Petazzoni <max@signalfuse.com>
 meejah <meejah@meejah.ca>
 Michael Brown <michael@netdirect.ca>
-Michael Crosby <michael@crosbymichael.com>
+Michael Crosby <michael@docker.com>
 Michael Gorsuch <gorsuch@github.com>
 Michael Neale <michael.neale@gmail.com>
+Michael Prokop <github@michael-prokop.at>
 Michael Stapelberg <michael+gh@stapelberg.de>
+Michaël Pailloncy <mpapo.dev@gmail.com>
+Michiel@unhosted <michiel@unhosted.org>
 Miguel Angel Fernández <elmendalerenda@gmail.com>
+Mike Chelen <michael.chelen@gmail.com>
 Mike Gaffney <mike@uberu.com>
 Mike MacCana <mike.maccana@gmail.com>
 Mike Naberezny <mike@naberezny.com>
+Mike Snitzer <snitzer@redhat.com>
 Mikhail Sobolev <mss@mawhrin.net>
 Mohit Soni <mosoni@ebay.com>
 Morgante Pell <morgante.pell@morgante.net>
 Morten Siebuhr <sbhr@sbhr.dk>
+Mrunal Patel <mrunalp@gmail.com>
 Nan Monnand Deng <monnand@gmail.com>
+Naoki Orii <norii@cs.cmu.edu>
 Nate Jones <nate@endot.org>
 Nathan Kleyn <nathan@nathankleyn.com>
+Nathan LeClaire <nathan.leclaire@docker.com>
 Nelson Chen <crazysim@gmail.com>
 Niall O'Higgins <niallo@unworkable.org>
 Nick Payne <nick@kurai.co.uk>
@@ -309,15 +407,20 @@ Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
 Nick Stinemates <nick@stinemates.org>
 Nicolas Dudebout <nicolas.dudebout@gatech.edu>
 Nicolas Kaiser <nikai@nikai.net>
+NikolaMandic <mn080202@gmail.com>
 noducks <onemannoducks@gmail.com>
 Nolan Darilek <nolan@thewordnerd.info>
+O.S. Tezer <ostezer@gmail.com>
+OddBloke <daniel@daniel-watkins.co.uk>
 odk- <github@odkurzacz.org>
 Oguz Bilgic <fisyonet@gmail.com>
 Ole Reifschneider <mail@ole-reifschneider.de>
-O.S. Tezer <ostezer@gmail.com>
+Olivier Gambier <dmp42@users.noreply.github.com>
 pandrew <letters@paulnotcom.se>
 Pascal Borreli <pascal@borreli.com>
+Patrick Hemmer <patrick.hemmer@gmail.com>
 pattichen <craftsbear@gmail.com>
+Paul <paul9869@gmail.com>
 Paul Annesley <paul@annesley.cc>
 Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Hammond <paul@paulhammond.org>
@@ -325,49 +428,71 @@ Paul Jimenez <pj@place.org>
 Paul Lietar <paul@lietar.net>
 Paul Morie <pmorie@gmail.com>
 Paul Nasrat <pnasrat@gmail.com>
-Paul <paul9869@gmail.com>
+Paul Weaver <pauweave@cisco.com>
+Peter Bourgon <peter@bourgon.org>
 Peter Braden <peterbraden@peterbraden.co.uk>
-Peter Waller <peter@scraperwiki.com>
-Phillip Alexander <git@phillipalexander.io>
+Peter Waller <p@pwaller.net>
+Phil <underscorephil@gmail.com>
 Phil Spitler <pspitler@gmail.com>
+Phillip Alexander <git@phillipalexander.io>
 Piergiuliano Bossi <pgbossi@gmail.com>
 Pierre-Alain RIVIERE <pariviere@ippon.fr>
 Piotr Bogdan <ppbogdan@gmail.com>
 pysqz <randomq@126.com>
 Quentin Brossard <qbrossard@gmail.com>
+r0n22 <cameron.regan@gmail.com>
 Rafal Jeczalik <rjeczalik@gmail.com>
 Rajat Pandit <rp@rajatpandit.com>
+Rajdeep Dua <dua_rajdeep@yahoo.com>
 Ralph Bean <rbean@redhat.com>
 Ramkumar Ramachandra <artagnon@gmail.com>
 Ramon van Alteren <ramon@vanalteren.nl>
 Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
 rgstephens <greg@udon.org>
 Rhys Hiltner <rhys@twitch.tv>
+Richard Harvey <richard@squarecows.com>
 Richo Healey <richo@psych0tik.net>
 Rick Bradley <rick@users.noreply.github.com>
+Rick van de Loo <rickvandeloo@gmail.com>
+Robert Bachmann <rb@robertbachmann.at>
 Robert Obryk <robryk@gmail.com>
 Roberto G. Hashioka <roberto.hashioka@docker.com>
+Robin Speekenbrink <robin@kingsquare.nl>
 robpc <rpcann@gmail.com>
 Rodrigo Vaz <rodrigo.vaz@gmail.com>
 Roel Van Nyen <roel.vannyen@gmail.com>
 Roger Peppe <rogpeppe@gmail.com>
 Rohit Jnagal <jnagal@google.com>
+Roland Huß <roland@jolokia.org>
 Roland Moriz <rmoriz@users.noreply.github.com>
+Ron Smits <ron.smits@gmail.com>
 Rovanion Luckey <rovanion.luckey@gmail.com>
+Rudolph Gottesheim <r.gottesheim@loot.at>
+Ryan Anderson <anderson.ryanc@gmail.com>
 Ryan Aslett <github@mixologic.com>
 Ryan Fowler <rwfowler@gmail.com>
 Ryan O'Donnell <odonnellryanc@gmail.com>
 Ryan Seto <ryanseto@yak.net>
 Ryan Thomas <rthomas@atlassian.com>
+s-ko <aleks@s-ko.net>
 Sam Alba <sam.alba@gmail.com>
+Sam Bailey <cyprix@cyprix.com.au>
 Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
+Sam Reis <sreis@atlassian.com>
 Sam Rijs <srijs@airpost.net>
 Samuel Andaya <samuel@andaya.net>
+satoru <satorulogic@gmail.com>
+Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
 Scott Bessler <scottbessler@gmail.com>
 Scott Collier <emailscottcollier@gmail.com>
 Sean Cronin <seancron@gmail.com>
 Sean P. Kane <skane@newrelic.com>
-Sébastien Stormacq <sebsto@users.noreply.github.com>
+Sebastiaan van Stijn <github@gone.nl>
+Sebastiaan van Stijn <thaJeztah@users.noreply.github.com>
+Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
+SeongJae Park <sj38.park@gmail.com>
+Shane Canon <scanon@lbl.gov>
+shaunol <shaunol@gmail.com>
 Shawn Landden <shawn@churchofgit.com>
 Shawn Siefkas <shawn.siefkas@meredith.com>
 Shih-Yuan Lee <fourdollars@gmail.com>
@@ -378,14 +503,19 @@ Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
 Solomon Hykes <solomon@docker.com>
 Song Gao <song@gao.io>
 Soulou <leo@unbekandt.eu>
+soulshake <amy@gandi.net>
 Sridatta Thatipamala <sthatipamala@gmail.com>
 Sridhar Ratnakumar <sridharr@activestate.com>
 Steeve Morin <steeve.morin@gmail.com>
 Stefan Praszalowicz <stefan@greplin.com>
+Stephen Crosby <stevecrozz@gmail.com>
 Steven Burgess <steven.a.burgess@hotmail.com>
 sudosurootdev <sudosurootdev@gmail.com>
-Sven Dowideit <SvenDowideit@home.org.au>
+Sven Dowideit <svendowideit@home.org.au>
 Sylvain Bellemare <sylvain.bellemare@ezeep.com>
+Sébastien <sebastien@yoozio.com>
+Sébastien Luttringer <seblu@seblu.net>
+Sébastien Stormacq <sebsto@users.noreply.github.com>
 tang0th <tang0th@gmx.com>
 Tatsuki Sugiura <sugi@nemui.org>
 Tehmasp Chaudhri <tehmasp@gmail.com>
@@ -400,19 +530,24 @@ Thomas Schroeter <thomas@cliqz.com>
 Tianon Gravi <admwiggin@gmail.com>
 Tibor Vass <teabee89@gmail.com>
 Tim Bosse <taim@bosboot.org>
-Timothy Hobbs <timothyhobbs@seznam.cz>
 Tim Ruffles <oi@truffles.me.uk>
+Tim Ruffles <timruffles@googlemail.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
+Timothy Hobbs <timothyhobbs@seznam.cz>
 tjmehta <tj@init.me>
 Tobias Bieniek <Tobias.Bieniek@gmx.de>
+Tobias Gesellchen <tobias@gesellix.de>
 Tobias Schmidt <ts@soundcloud.com>
 Tobias Schwab <tobias.schwab@dynport.de>
 Todd Lunter <tlunter@gmail.com>
 Tom Fotherby <tom+github@peopleperhour.com>
 Tom Hulihan <hulihan.tom159@gmail.com>
+Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
 Tommaso Visconti <tommaso.visconti@gmail.com>
 Tony Daws <tony@daws.ca>
+tpng <benny.tpng@gmail.com>
 Travis Cline <travis.cline@gmail.com>
+Trent Ogren <tedwardo2@gmail.com>
 Tyler Brock <tyler.brock@gmail.com>
 Tzu-Jung Lee <roylee17@gmail.com>
 Ulysse Carion <ulyssecarion@gmail.com>
@@ -434,21 +569,29 @@ Vivek Agarwal <me@vivek.im>
 Vladimir Bulyga <xx@ccxx.cc>
 Vladimir Kirillov <proger@wilab.org.ua>
 Vladimir Rutsky <altsysrq@gmail.com>
+waitingkuo <waitingkuo0527@gmail.com>
 Walter Leibbrandt <github@wrl.co.za>
 Walter Stanish <walter@pratyeka.org>
 WarheadsSE <max@warheads.net>
 Wes Morgan <cap10morgan@gmail.com>
 Will Dietz <w@wdtz.org>
-William Delanoue <william.delanoue@gmail.com>
-William Henry <whenry@redhat.com>
 Will Rouesnel <w.rouesnel@gmail.com>
 Will Weaver <monkey@buildingbananas.com>
+William Delanoue <william.delanoue@gmail.com>
+William Henry <whenry@redhat.com>
+William Riancho <wr.wllm@gmail.com>
+William Thurston <thurstw@amazon.com>
+wyc <wayne@neverfear.org>
 Xiuming Chen <cc@cxm.cc>
 Yang Bai <hamo.by@gmail.com>
 Yasunori Mahata <nori@mahata.net>
 Yurii Rashkovskii <yrashk@gmail.com>
+Zac Dover <zdover@redhat.com>
 Zain Memon <zain@inzain.net>
 Zaiste! <oh@zaiste.net>
+Zane DeGraffenried <zane.deg@gmail.com>
 Zilin Du <zilin.du@gmail.com>
 zimbatm <zimbatm@zimbatm.com>
+Zoltan Tombol <zoltan.tombol@gmail.com>
 zqh <zqhxuyuan@gmail.com>
+Álvaro Lázaro <alvaro.lazaro.g@gmail.com>

+ 27 - 8
CHANGELOG.md

@@ -1,5 +1,24 @@
 # Changelog
 
+## 1.2.0 (2014-08-20)
+
+#### Runtime
++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime
++ Auto-restart containers using policies
++ Use /var/lib/docker/tmp for large temporary files
++ `--cap-add` and `--cap-drop` to tweak what linux capability you want
++ `--device` to use devices in containers
+
+#### Client
++ `docker search` on private registries
++ Add `exited` filter to `docker ps --filter`
+* `docker rm -f` now kills instead of stop
++ Support for IPv6 addresses in `--dns` flag
+
+#### Proxy
++ Proxy instances in separate processes
+* Small bug fix on UDP proxy
+
 ## 1.1.2 (2014-07-23)
 
 #### Runtime
@@ -313,7 +332,7 @@
 - Add newlines to the JSON stream functions.
 
 #### Runtime
-* Do not ping the registry from the CLI. All requests to registres flow through the daemon.
+* Do not ping the registry from the CLI. All requests to registries flow through the daemon.
 - Check for nil information return in the lxc driver. This fixes panics with older lxc versions.
 - Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently.
 - Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device.
@@ -905,7 +924,7 @@ With the ongoing changes to the networking and execution subsystems of docker te
 
 + Add domainname support
 + Implement image filtering with path.Match
-* Remove unnecesasry warnings
+* Remove unnecessary warnings
 * Remove os/user dependency
 * Only mount the hostname file when the config exists
 * Handle signals within the `docker login` command
@@ -928,7 +947,7 @@ With the ongoing changes to the networking and execution subsystems of docker te
 + Hack: Vendor all dependencies
 * Remote API: Bump to v1.5
 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc.
-* Documentation: General improvments
+* Documentation: General improvements
 
 ## 0.6.1 (2013-08-23)
 
@@ -1198,7 +1217,7 @@ With the ongoing changes to the networking and execution subsystems of docker te
 * Prevent rm of running containers
 * Use go1.1 cookiejar
 - Fix issue detaching from running TTY container
-- Forbid parralel push/pull for a single image/repo. Fixes #311
+- Forbid parallel push/pull for a single image/repo. Fixes #311
 - Fix race condition within Run command when attaching.
 
 #### Client
@@ -1314,7 +1333,7 @@ With the ongoing changes to the networking and execution subsystems of docker te
 + Add caching to docker builder
 + Add support for docker builder with native API as top level command
 + Implement ENV within docker builder
-- Check the command existance prior create and add Unit tests for the case
+- Check the command existence prior create and add Unit tests for the case
 * use any whitespaces instead of tabs
 
 #### Runtime
@@ -1353,13 +1372,13 @@ With the ongoing changes to the networking and execution subsystems of docker te
 
 #### Runtime
 
-- Fix the command existance check
+- Fix the command existence check
 - strings.Split may return an empty string on no match
 - Fix an index out of range crash if cgroup memory is not
 
 #### Documentation
 
-* Various improvments
+* Various improvements
 + New example: sharing data between 2 couchdb databases
 
 #### Other
@@ -1389,7 +1408,7 @@ With the ongoing changes to the networking and execution subsystems of docker te
 ## 0.2.0 (2013-04-23)
 
 - Runtime: ghost containers can be killed and waited for
-* Documentation: update install intructions
+* Documentation: update install instructions
 - Packaging: fix Vagrantfile
 - Development: automate releasing binaries and ubuntu packages
 + Add a changelog

+ 153 - 41
CONTRIBUTING.md

@@ -4,14 +4,52 @@ Want to hack on Docker? Awesome! Here are instructions to get you
 started. They are probably not perfect, please let us know if anything
 feels wrong or incomplete.
 
+## Topics
+
+* [Security Reports](#security-reports)
+* [Design and Cleanup Proposals](#design-and-cleanup-proposals)
+* [Reporting Issues](#reporting-issues)
+* [Build Environment](#build-environment)
+* [Contribution Guidelines](#contribution-guidelines)
+* [Community Guidelines](#docker-community-guidelines)
+
+## Security Reports
+
+Please **DO NOT** file an issue for security related issues. Please send your
+reports to [security@docker.com](mailto:security@docker.com) instead.
+
+## Design and Cleanup Proposals
+
+When considering a design proposal, we are looking for:
+
+* A description of the problem this design proposal solves
+* An issue -- not a pull request -- that describes what you will take action on
+  * Please prefix your issue with `Proposal:` in the title
+* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open)
+  before reporting a new issue. You can always pair with someone if you both
+  have the same idea.
+
+When considering a cleanup task, we are looking for:
+
+* A description of the refactors made
+  * Please note any logic changes if necessary
+* A pull request with the code
+  * Please prefix your PR's title with `Cleanup:` so we can quickly address it.
+  * Your pull request must remain up to date with master, so rebase as necessary.
+
 ## Reporting Issues
 
-When reporting [issues](https://github.com/dotcloud/docker/issues) 
-on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc),
-the output of `uname -a` and the output of `docker version` along with
-the output of `docker -D info`. Please include the steps required to reproduce
-the problem if possible and applicable.
-This information will help us review and fix your issue faster.
+When reporting [issues](https://github.com/docker/docker/issues) on
+GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc).
+Please include:
+
+* The output of `uname -a`.
+* The output of `docker version`.
+* The output of `docker -D info`.
+
+Please also include the steps required to reproduce the problem if
+possible and applicable.  This information will help us review and fix
+your issue faster.
 
 ## Build Environment
 
@@ -34,7 +72,7 @@ received feedback on what to improve.
 We're trying very hard to keep Docker lean and focused. We don't want it
 to do everything for everybody. This means that we might decide against
 incorporating a new feature. However, there might be a way to implement
-that feature *on top of* docker.
+that feature *on top of* Docker.
 
 ### Discuss your design on the mailing list
 
@@ -48,7 +86,7 @@ else is working on the same thing.
 ### Create issues...
 
 Any significant improvement should be documented as [a GitHub
-issue](https://github.com/dotcloud/docker/issues) before anybody
+issue](https://github.com/docker/docker/issues) before anybody
 starts working on it.
 
 ### ...but check for existing issues first!
@@ -60,12 +98,12 @@ help prioritize the most common problems and requests.
 
 ### Conventions
 
-Fork the repo and make changes on your fork in a feature branch:
+Fork the repository and make changes on your fork in a feature branch:
 
-- If it's a bugfix branch, name it XXX-something where XXX is the number of the
-  issue
+- If it's a bug fix branch, name it XXXX-something where XXXX is the number of the
+  issue.
 - If it's a feature branch, create an enhancement issue to announce your
-  intentions, and name it XXX-something where XXX is the number of the issue.
+  intentions, and name it XXXX-something where XXXX is the number of the issue.
 
 Submit unit tests for your changes.  Go has a great test framework built in; use
 it! Take a look at existing tests for inspiration. Run the full test suite on
@@ -73,18 +111,16 @@ your branch before submitting a pull request.
 
 Update the documentation when creating or modifying features. Test
 your documentation changes for clarity, concision, and correctness, as
-well as a clean documentation build. See ``docs/README.md`` for more
-information on building the docs and how docs get released.
+well as a clean documentation build. See `docs/README.md` for more
+information on building the docs and how they get released.
 
 Write clean code. Universally formatted code promotes ease of writing, reading,
 and maintenance. Always run `gofmt -s -w file.go` on each changed file before
-committing your changes. Most editors have plugins that do this automatically.
+committing your changes. Most editors have plug-ins that do this automatically.
 
 Pull requests descriptions should be as clear as possible and include a
 reference to all the issues that they address.
 
-Pull requests must not contain commits from other users or branches.
-
 Commit messages must start with a capitalized and short summary (max. 50
 chars) written in the imperative, followed by an optional, more detailed
 explanatory text which is separated from the summary by an empty line.
@@ -95,26 +131,33 @@ sure to post a comment after pushing. The new commits will show up in the pull
 request automatically, but the reviewers will not be notified unless you
 comment.
 
+Pull requests must be cleanly rebased ontop of master without multiple branches
+mixed into the PR.
+
+**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
+feature branch to update your pull request rather than `merge master`.
+
 Before the pull request is merged, make sure that you squash your commits into
 logical units of work using `git rebase -i` and `git push -f`. After every
 commit the test suite should be passing. Include documentation changes in the
 same commit so that a revert would remove all traces of the feature or fix.
 
-Commits that fix or close an issue should include a reference like `Closes #XXX`
-or `Fixes #XXX`, which will automatically close the issue when merged.
+Commits that fix or close an issue should include a reference like
+`Closes #XXXX` or `Fixes #XXXX`, which will automatically close the
+issue when merged.
 
-Please do not add yourself to the AUTHORS file, as it is regenerated
+Please do not add yourself to the `AUTHORS` file, as it is regenerated
 regularly from the Git history.
 
 ### Merge approval
 
-Docker maintainers use LGTM (looks good to me) in comments on the code review
+Docker maintainers use LGTM (Looks Good To Me) in comments on the code review
 to indicate acceptance.
 
 A change requires LGTMs from an absolute majority of the maintainers of each
-component affected. For example, if a change affects docs/ and registry/, it
-needs an absolute majority from the maintainers of docs/ AND, separately, an
-absolute majority of the maintainers of registry.
+component affected. For example, if a change affects `docs/` and `registry/`, it
+needs an absolute majority from the maintainers of `docs/` AND, separately, an
+absolute majority of the maintainers of `registry/`.
 
 For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
 
@@ -137,7 +180,6 @@ San Francisco, CA 94110 USA
 Everyone is permitted to copy and distribute verbatim copies of this
 license document, but changing it is not allowed.
 
-
 Developer's Certificate of Origin 1.1
 
 By making a contribution to this project, I certify that:
@@ -165,20 +207,18 @@ By making a contribution to this project, I certify that:
     this project or the open source license(s) involved.
 ```
 
-then you just add a line to every git commit message:
+Then you just add a line to every git commit message:
 
-    Docker-DCO-1.1-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
+    Signed-off-by: Joe Smith <joe.smith@email.com>
 
-using your real name (sorry, no pseudonyms or anonymous contributions.)
+Using your real name (sorry, no pseudonyms or anonymous contributions.)
 
-One way to automate this, is customise your get ``commit.template`` by adding
-a ``prepare-commit-msg`` hook to your docker checkout:
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
 
-```
-curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
-```
-
-* Note: the above script expects to find your GitHub user name in ``git config --get github.user``
+Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still
+accepted, so there is no need to update outstanding pull requests to the new
+format right away, but please do adjust your processes for future contributions.
 
 #### Small patch exception
 
@@ -194,11 +234,83 @@ If you have any questions, please refer to the FAQ in the [docs](http://docs.doc
 
 ### How can I become a maintainer?
 
-* Step 1: learn the component inside out
-* Step 2: make yourself useful by contributing code, bugfixes, support etc.
-* Step 3: volunteer on the irc channel (#docker@freenode)
-* Step 4: propose yourself at a scheduled docker meeting in #docker-dev
+* Step 1: Learn the component inside out
+* Step 2: Make yourself useful by contributing code, bug fixes, support etc.
+* Step 3: Volunteer on the IRC channel (#docker at Freenode)
+* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev
+
+Don't forget: being a maintainer is a time investment. Make sure you
+will have time to make yourself available.  You don't have to be a
+maintainer to make a difference on the project!
+
+### IRC Meetings
+
+There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones.
+Anybody can ask for a topic to be discussed prior to the meeting.
+
+If you feel the conversation is going off-topic, feel free to point it out.
+
+For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes).
+They also contain all the notes from previous meetings.
+
+## Docker Community Guidelines
+
+We want to keep the Docker community awesome, growing and collaborative. We
+need your help to keep it that way. To help with this we've come up with some
+general guidelines for the community as a whole:
+
+* Be nice: Be courteous, respectful and polite to fellow community members: no
+  regional, racial, gender, or other abuse will be tolerated. We like nice people
+  way better than mean ones!
+
+* Encourage diversity and participation: Make everyone in our community
+  feel welcome, regardless of their background and the extent of their
+  contributions, and do everything possible to encourage participation in
+  our community.
+
+* Keep it legal: Basically, don't get us in trouble. Share only content that
+  you own, do not share private or sensitive information, and don't break the
+  law.
+
+* Stay on topic: Make sure that you are posting to the correct channel
+  and avoid off-topic discussions. Remember when you update an issue or
+  respond to an email you are potentially sending to a large number of
+  people.  Please consider this before you update.  Also remember that
+  nobody likes spam.
+
+### Guideline Violations — 3 Strikes Method
+
+The point of this section is not to find opportunities to punish people, but we
+do need a fair way to deal with people who are making our community suck.
+
+1. First occurrence: We'll give you a friendly, but public reminder that the
+   behavior is inappropriate according to our guidelines.
+
+2. Second occurrence: We will send you a private message with a warning that
+   any additional violations will result in removal from the community.
+
+3. Third occurrence: Depending on the violation, we may need to delete or ban
+   your account.
+
+**Notes:**
+
+* Obvious spammers are banned on first occurrence. If we don't do this, we'll
+  have spam all over the place.
+
+* Violations are forgiven after 6 months of good behavior, and we won't
+  hold a grudge.
+
+* People who commit minor infractions will get some education,
+  rather than hammering them in the 3 strikes process.
+
+* The rules apply equally to everyone in the community, no matter how
+  much you've contributed.
+
+* Extreme violations of a threatening, abusive, destructive or illegal nature
+  will be addressed immediately and are not subject to 3 strikes or
+  forgiveness.
 
-Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
-You don't have to be a maintainer to make a difference on the project!
+* Contact james@docker.com to report abuse or appeal violations. In the case of
+  appeals, we know that mistakes happen, and we'll work with you to come up with
+  a fair solution if there has been a misunderstanding.
 

+ 14 - 8
Dockerfile

@@ -6,7 +6,7 @@
 # docker build -t docker .
 #
 # # Mount your source in an interactive container for quick testing:
-# docker run -v `pwd`:/go/src/github.com/dotcloud/docker --privileged -i -t docker bash
+# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
 #
 # # Run the test suite:
 # docker run --privileged docker hack/make.sh test
@@ -28,8 +28,7 @@ FROM	ubuntu:14.04
 MAINTAINER	Tianon Gravi <admwiggin@gmail.com> (@tianon)
 
 # Packaged dependencies
-RUN	apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
-	apt-utils \
+RUN	apt-get update && apt-get install -y \
 	aufs-tools \
 	automake \
 	btrfs-tools \
@@ -43,7 +42,7 @@ RUN	apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
 	libsqlite3-dev \
 	lxc=1.0* \
 	mercurial \
-	pandoc \
+	parallel \
 	reprepro \
 	ruby1.9.1 \
 	ruby1.9.1-dev \
@@ -60,9 +59,10 @@ RUN	cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper
 # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
 # Install Go
-RUN	curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz
+RUN	curl -sSL https://golang.org/dl/go1.3.1.src.tar.gz | tar -v -C /usr/local -xz
 ENV	PATH	/usr/local/go/bin:$PATH
-ENV	GOPATH	/go:/go/src/github.com/dotcloud/docker/vendor
+ENV	GOPATH	/go:/go/src/github.com/docker/docker/vendor
+ENV PATH /go/bin:$PATH
 RUN	cd /usr/local/go/src && ./make.bash --no-clean 2>&1
 
 # Compile Go for cross compilation
@@ -80,6 +80,12 @@ RUN	go get code.google.com/p/go.tools/cmd/cover
 # TODO replace FPM with some very minimal debhelper stuff
 RUN	gem install --no-rdoc --no-ri fpm --version 1.0.2
 
+# Install man page generator
+RUN mkdir -p /go/src/github.com/cpuguy83 \
+    && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
+    && cd /go/src/github.com/cpuguy83/go-md2man \
+    && go get -v ./...
+
 # Get the "busybox" image source so we can build locally instead of pulling
 RUN	git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
 
@@ -94,11 +100,11 @@ RUN groupadd -r docker
 RUN useradd --create-home --gid docker unprivilegeduser
 
 VOLUME	/var/lib/docker
-WORKDIR	/go/src/github.com/dotcloud/docker
+WORKDIR	/go/src/github.com/docker/docker
 ENV	DOCKER_BUILDTAGS	apparmor selinux
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT	["hack/dind"]
 
 # Upload docker source
-ADD	.	/go/src/github.com/dotcloud/docker
+COPY	.	/go/src/github.com/docker/docker

+ 0 - 24
FIXME

@@ -1,24 +0,0 @@
-
-## FIXME
-
-This file is a loose collection of things to improve in the codebase, for the internal
-use of the maintainers.
-
-They are not big enough to be in the roadmap, not user-facing enough to be github issues,
-and not important enough to be discussed in the mailing list.
-
-They are just like FIXME comments in the source code, except we're not sure where in the source
-to put them - so we put them here :)
-
-
-* Run linter on codebase
-* Unify build commands and regular commands
-* Move source code into src/ subdir for clarity
-* docker build: on non-existent local path for ADD, don't show full absolute path on the host
-* use size header for progress bar in pull
-* Clean up context upload in build!!!
-* Parallel pull
-* Upgrade dockerd without stopping containers
-* Simple command to remove all untagged images (`docker rmi $(docker images | awk '/^<none>/ { print $3 }')`)
-* Simple command to clean up containers for disk space
-* Clean up the ProgressReader api, it's a PITA to use

+ 4 - 3
Makefile

@@ -1,7 +1,8 @@
 .PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate
 
 # to allow `make BINDDIR=. shell` or `make BINDDIR= test`
-BINDDIR := bundles
+# (default to no bind mount if DOCKER_HOST is set)
+BINDDIR := $(if $(DOCKER_HOST),,bundles)
 # to allow `make DOCSPORT=9000 docs`
 DOCSPORT := 8000
 
@@ -9,7 +10,7 @@ GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
 GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
 DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
 DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
-DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)")
+DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
 
 DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
 # to allow `make DOCSDIR=docs docs-shell`
@@ -33,7 +34,7 @@ docs-shell: docs-build
 	$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
 
 docs-release: docs-build
-	$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh
+	$(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh
 
 test: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli

+ 5 - 3
README.md

@@ -131,9 +131,8 @@ Here's a typical Docker build process:
 
 ```bash
 FROM ubuntu:12.04
-RUN apt-get update
-RUN apt-get install -q -y python python-pip curl
-RUN curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
+RUN apt-get update && apt-get install -y python python-pip curl
+RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
 RUN cd helloflask-master && pip install -r requirements.txt
 ```
 
@@ -178,6 +177,9 @@ Under the hood, Docker is built on the following components:
 Contributing to Docker
 ======================
 
+[![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker)
+[![Travis](https://travis-ci.org/docker/docker.svg?branch=master)](https://travis-ci.org/docker/docker)
+
 Want to hack on Docker? Awesome! There are instructions to get you
 started [here](CONTRIBUTING.md).
 

+ 1 - 1
VERSION

@@ -1 +1 @@
-1.1.2
+1.2.0

+ 2 - 2
api/README.md

@@ -1,5 +1,5 @@
 This directory contains code pertaining to the Docker API:
 
- - Used by the docker client when comunicating with the docker deamon
+ - Used by the docker client when communicating with the docker daemon
 
- - Used by third party tools wishing to interface with the docker deamon
+ - Used by third party tools wishing to interface with the docker daemon

+ 18 - 17
api/client/cli.go

@@ -10,11 +10,24 @@ import (
 	"strings"
 	"text/template"
 
-	flag "github.com/dotcloud/docker/pkg/mflag"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/registry"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/registry"
 )
 
+type DockerCli struct {
+	proto      string
+	addr       string
+	configFile *registry.ConfigFile
+	in         io.ReadCloser
+	out        io.Writer
+	err        io.Writer
+	isTerminal bool
+	terminalFd uintptr
+	tlsConfig  *tls.Config
+	scheme     string
+}
+
 var funcMap = template.FuncMap{
 	"json": func(v interface{}) string {
 		a, _ := json.Marshal(v)
@@ -34,7 +47,8 @@ func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
 	return method.Interface().(func(...string) error), true
 }
 
-func (cli *DockerCli) ParseCommands(args ...string) error {
+// Cmd executes the specified command
+func (cli *DockerCli) Cmd(args ...string) error {
 	if len(args) > 0 {
 		method, exists := cli.getMethod(args[0])
 		if !exists {
@@ -97,16 +111,3 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsC
 		scheme:     scheme,
 	}
 }
-
-type DockerCli struct {
-	proto      string
-	addr       string
-	configFile *registry.ConfigFile
-	in         io.ReadCloser
-	out        io.Writer
-	err        io.Writer
-	isTerminal bool
-	terminalFd uintptr
-	tlsConfig  *tls.Config
-	scheme     string
-}

+ 166 - 100
api/client/commands.go

@@ -22,19 +22,21 @@ import (
 	"text/template"
 	"time"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/opts"
-	"github.com/dotcloud/docker/pkg/signal"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/pkg/units"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
-	"github.com/dotcloud/docker/utils/filters"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/parsers/filters"
+	"github.com/docker/docker/pkg/signal"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/pkg/units"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
 const (
@@ -67,7 +69,8 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 		{"inspect", "Return low-level information on a container"},
 		{"kill", "Kill a running container"},
 		{"load", "Load an image from a tar archive"},
-		{"login", "Register or log in to the Docker registry server"},
+		{"login", "Register or log in to a Docker registry server"},
+		{"logout", "Log out from a Docker registry server"},
 		{"logs", "Fetch the logs of a container"},
 		{"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"},
 		{"pause", "Pause all processes within a container"},
@@ -161,28 +164,32 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 		if _, err = os.Stat(filename); os.IsNotExist(err) {
 			return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
 		}
-		if err = utils.ValidateContextDirectory(root); err != nil {
+		var excludes []string
+		ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
+		if err != nil && !os.IsNotExist(err) {
+			return fmt.Errorf("Error reading .dockerignore: '%s'", err)
+		}
+		for _, pattern := range strings.Split(string(ignore), "\n") {
+			ok, err := filepath.Match(pattern, "Dockerfile")
+			if err != nil {
+				return fmt.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err)
+			}
+			if ok {
+				return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern)
+			}
+			excludes = append(excludes, pattern)
+		}
+		if err = utils.ValidateContextDirectory(root, excludes); err != nil {
 			return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
 		}
 		options := &archive.TarOptions{
 			Compression: archive.Uncompressed,
-		}
-		if ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")); err != nil && !os.IsNotExist(err) {
-			return fmt.Errorf("Error reading .dockerignore: '%s'", err)
-		} else if err == nil {
-			for _, pattern := range strings.Split(string(ignore), "\n") {
-				ok, err := filepath.Match(pattern, "Dockerfile")
-				if err != nil {
-					utils.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err)
-					continue
-				}
-				if ok {
-					return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern)
-				}
-				options.Excludes = append(options.Excludes, pattern)
-			}
+			Excludes:    excludes,
 		}
 		context, err = archive.TarWithOptions(root, options)
+		if err != nil {
+			return err
+		}
 	}
 	var body io.Reader
 	// Setup an upload progress bar
@@ -196,7 +203,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 
 	//Check if the given image name can be resolved
 	if *tag != "" {
-		repository, _ := utils.ParseRepositoryTag(*tag)
+		repository, _ := parsers.ParseRepositoryTag(*tag)
 		if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
 			return err
 		}
@@ -349,6 +356,32 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	return nil
 }
 
+// log out from a Docker registry
+func (cli *DockerCli) CmdLogout(args ...string) error {
+	cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
+
+	if err := cmd.Parse(args); err != nil {
+		return nil
+	}
+	serverAddress := registry.IndexServerAddress()
+	if len(cmd.Args()) > 0 {
+		serverAddress = cmd.Arg(0)
+	}
+
+	cli.LoadConfigFile()
+	if _, ok := cli.configFile.Configs[serverAddress]; !ok {
+		fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress)
+	} else {
+		fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress)
+		delete(cli.configFile.Configs, serverAddress)
+
+		if err := registry.SaveConfig(cli.configFile); err != nil {
+			return fmt.Errorf("Failed to save docker config: %v", err)
+		}
+	}
+	return nil
+}
+
 // 'docker wait': block until a container stops
 func (cli *DockerCli) CmdWait(args ...string) error {
 	cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.")
@@ -391,6 +424,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
 	if dockerversion.GITCOMMIT != "" {
 		fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT)
 	}
+	fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH)
 
 	body, _, err := readBody(cli.call("GET", "/version", nil, false))
 	if err != nil {
@@ -400,11 +434,11 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
 	out := engine.NewOutput()
 	remoteVersion, err := out.AddEnv()
 	if err != nil {
-		utils.Errorf("Error reading remote version: %s\n", err)
+		log.Errorf("Error reading remote version: %s", err)
 		return err
 	}
 	if _, err := out.Write(body); err != nil {
-		utils.Errorf("Error reading remote version: %s\n", err)
+		log.Errorf("Error reading remote version: %s", err)
 		return err
 	}
 	out.Close()
@@ -440,7 +474,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 	}
 
 	if _, err := out.Write(body); err != nil {
-		utils.Errorf("Error reading remote info: %s\n", err)
+		log.Errorf("Error reading remote info: %s", err)
 		return err
 	}
 	out.Close()
@@ -457,6 +491,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 	}
 	fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
 	fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
+	fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem"))
 
 	if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
 		fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
@@ -471,9 +506,6 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 		if initPath := remoteInfo.Get("InitPath"); initPath != "" {
 			fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
 		}
-		if len(remoteInfo.GetList("Sockets")) != 0 {
-			fmt.Fprintf(cli.out, "Sockets: %v\n", remoteInfo.GetList("Sockets"))
-		}
 	}
 
 	if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
@@ -551,7 +583,7 @@ func (cli *DockerCli) CmdRestart(args ...string) error {
 }
 
 func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
-	sigc := make(chan os.Signal, 1)
+	sigc := make(chan os.Signal, 128)
 	signal.CatchAll(sigc)
 	go func() {
 		for s := range sigc {
@@ -566,10 +598,10 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
 				}
 			}
 			if sig == "" {
-				utils.Errorf("Unsupported signal: %d. Discarding.", s)
+				log.Errorf("Unsupported signal: %d. Discarding.", s)
 			}
 			if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil {
-				utils.Debugf("Error sending signal: %s", err)
+				log.Debugf("Error sending signal: %s", err)
 			}
 		}
 	}()
@@ -659,7 +691,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 	if *openStdin || *attach {
 		if tty && cli.isTerminal {
 			if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
-				utils.Errorf("Error monitoring TTY size: %s\n", err)
+				log.Errorf("Error monitoring TTY size: %s", err)
 			}
 		}
 		return <-cErr
@@ -982,7 +1014,7 @@ func (cli *DockerCli) CmdRm(args ...string) error {
 	cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
 	v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
 	link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container")
-	force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container")
+	force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)")
 
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -991,6 +1023,7 @@ func (cli *DockerCli) CmdRm(args ...string) error {
 		cmd.Usage()
 		return nil
 	}
+
 	val := url.Values{}
 	if *v {
 		val.Set("v", "1")
@@ -998,6 +1031,7 @@ func (cli *DockerCli) CmdRm(args ...string) error {
 	if *link {
 		val.Set("link", "1")
 	}
+
 	if *force {
 		val.Set("force", "1")
 	}
@@ -1051,16 +1085,19 @@ func (cli *DockerCli) CmdImport(args ...string) error {
 		return nil
 	}
 
-	var src, repository, tag string
+	var (
+		v          = url.Values{}
+		src        = cmd.Arg(0)
+		repository = cmd.Arg(1)
+	)
+
+	v.Set("fromSrc", src)
+	v.Set("repo", repository)
 
 	if cmd.NArg() == 3 {
 		fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
-		src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
-	} else {
-		src = cmd.Arg(0)
-		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
+		v.Set("tag", cmd.Arg(2))
 	}
-	v := url.Values{}
 
 	if repository != "" {
 		//Check if the given image name can be resolved
@@ -1069,10 +1106,6 @@ func (cli *DockerCli) CmdImport(args ...string) error {
 		}
 	}
 
-	v.Set("repo", repository)
-	v.Set("tag", tag)
-	v.Set("fromSrc", src)
-
 	var in io.Reader
 
 	if src == "-" {
@@ -1096,7 +1129,7 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 
 	cli.LoadConfigFile()
 
-	remote, tag := utils.ParseRepositoryTag(name)
+	remote, tag := parsers.ParseRepositoryTag(name)
 
 	// Resolve the Repository name from fqn to hostname + name
 	hostname, _, err := registry.ResolveRepositoryName(remote)
@@ -1158,12 +1191,18 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 		cmd.Usage()
 		return nil
 	}
+	var (
+		v      = url.Values{}
+		remote = cmd.Arg(0)
+	)
+
+	v.Set("fromImage", remote)
 
-	remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0))
 	if *tag == "" {
-		*tag = parsedTag
+		v.Set("tag", *tag)
 	}
 
+	remote, _ = parsers.ParseRepositoryTag(remote)
 	// Resolve the Repository name from fqn to hostname + name
 	hostname, _, err := registry.ResolveRepositoryName(remote)
 	if err != nil {
@@ -1174,9 +1213,6 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 
 	// Resolve the Auth config relevant for this server
 	authConfig := cli.configFile.ResolveAuthConfig(hostname)
-	v := url.Values{}
-	v.Set("fromImage", remote)
-	v.Set("tag", *tag)
 
 	pull := func(authConfig registry.AuthConfig) error {
 		buf, err := json.Marshal(authConfig)
@@ -1216,7 +1252,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 	flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
 	flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
 
-	var flFilter opts.ListOpts
+	flFilter := opts.NewListOpts(nil)
 	cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')")
 
 	if err := cmd.Parse(args); err != nil {
@@ -1349,7 +1385,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 		for _, out := range outs.Data {
 			for _, repotag := range out.GetList("RepoTags") {
 
-				repo, tag := utils.ParseRepositoryTag(repotag)
+				repo, tag := parsers.ParseRepositoryTag(repotag)
 				outID := out.Get("Id")
 				if !*noTrunc {
 					outID = utils.TruncateID(outID)
@@ -1449,6 +1485,9 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 	before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.")
 	last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.")
 
+	flFilter := opts.NewListOpts(nil)
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values. Valid filters:\nexited=<int> - containers with exit code of <int>")
+
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -1472,6 +1511,24 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 		v.Set("size", "1")
 	}
 
+	// Consolidate all filter flags, and sanity check them.
+	// They'll get processed in the daemon/server.
+	psFilterArgs := filters.Args{}
+	for _, f := range flFilter.GetAll() {
+		var err error
+		psFilterArgs, err = filters.ParseFlag(f, psFilterArgs)
+		if err != nil {
+			return err
+		}
+	}
+	if len(psFilterArgs) > 0 {
+		filterJson, err := filters.ToParam(psFilterArgs)
+		if err != nil {
+			return err
+		}
+		v.Set("filters", filterJson)
+	}
+
 	body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false))
 	if err != nil {
 		return err
@@ -1511,6 +1568,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 				outCommand = out.Get("Command")
 				ports      = engine.NewTable("", 0)
 			)
+			outCommand = strconv.Quote(outCommand)
 			if !*noTrunc {
 				outCommand = utils.Trunc(outCommand, 20)
 			}
@@ -1549,7 +1607,7 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
 
 	var (
 		name            = cmd.Arg(0)
-		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
+		repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
 	)
 
 	if name == "" || len(cmd.Args()) > 2 {
@@ -1614,7 +1672,7 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 		loc = time.FixedZone(time.Now().Zone())
 	)
 	var setTime = func(key, value string) {
-		format := "2006-01-02 15:04:05 -0700 MST"
+		format := time.RFC3339Nano
 		if len(value) < len(format) {
 			format = format[:len(value)]
 		}
@@ -1736,7 +1794,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 	var (
 		cmd     = cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
 		noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
-		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied.")
+		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.")
 	)
 
 	if err := cmd.Parse(args); err != nil {
@@ -1770,7 +1828,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 
 	if tty && cli.isTerminal {
 		if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
-			utils.Debugf("Error monitoring TTY size: %s", err)
+			log.Debugf("Error monitoring TTY size: %s", err)
 		}
 	}
 
@@ -1862,7 +1920,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
 type ports []int
 
 func (cli *DockerCli) CmdTag(args ...string) error {
-	cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository")
+	cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository")
 	force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force")
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -1873,7 +1931,7 @@ func (cli *DockerCli) CmdTag(args ...string) error {
 	}
 
 	var (
-		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
+		repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
 		v               = url.Values{}
 	)
 
@@ -1894,6 +1952,41 @@ func (cli *DockerCli) CmdTag(args ...string) error {
 	return nil
 }
 
+func (cli *DockerCli) pullImage(image string) error {
+	v := url.Values{}
+	repos, tag := parsers.ParseRepositoryTag(image)
+	// pull only the image tagged 'latest' if no tag was specified
+	if tag == "" {
+		tag = "latest"
+	}
+	v.Set("fromImage", repos)
+	v.Set("tag", tag)
+
+	// Resolve the Repository name from fqn to hostname + name
+	hostname, _, err := registry.ResolveRepositoryName(repos)
+	if err != nil {
+		return err
+	}
+
+	// Load the auth config file, to be able to pull the image
+	cli.LoadConfigFile()
+
+	// Resolve the Auth config relevant for this server
+	authConfig := cli.configFile.ResolveAuthConfig(hostname)
+	buf, err := json.Marshal(authConfig)
+	if err != nil {
+		return err
+	}
+
+	registryAuthHeader := []string{
+		base64.URLEncoding.EncodeToString(buf),
+	}
+	if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
+		return err
+	}
+	return nil
+}
+
 func (cli *DockerCli) CmdRun(args ...string) error {
 	// FIXME: just use runconfig.Parse already
 	config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil)
@@ -1955,37 +2048,10 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	if statusCode == 404 {
 		fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image)
 
-		v := url.Values{}
-		repos, tag := utils.ParseRepositoryTag(config.Image)
-		// pull only the image tagged 'latest' if no tag was specified
-		if tag == "" {
-			tag = "latest"
-		}
-		v.Set("fromImage", repos)
-		v.Set("tag", tag)
-
-		// Resolve the Repository name from fqn to hostname + name
-		hostname, _, err := registry.ResolveRepositoryName(repos)
-		if err != nil {
-			return err
-		}
-
-		// Load the auth config file, to be able to pull the image
-		cli.LoadConfigFile()
-
-		// Resolve the Auth config relevant for this server
-		authConfig := cli.configFile.ResolveAuthConfig(hostname)
-		buf, err := json.Marshal(authConfig)
-		if err != nil {
-			return err
-		}
-
-		registryAuthHeader := []string{
-			base64.URLEncoding.EncodeToString(buf),
-		}
-		if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
+		if err = cli.pullImage(config.Image); err != nil {
 			return err
 		}
+		// Retry
 		if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil {
 			return err
 		}
@@ -2033,9 +2099,9 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 
 	// Block the return until the chan gets closed
 	defer func() {
-		utils.Debugf("End of CmdRun(), Waiting for hijack to finish.")
+		log.Debugf("End of CmdRun(), Waiting for hijack to finish.")
 		if _, ok := <-hijacked; ok {
-			utils.Errorf("Hijack did not finish (chan still open)")
+			log.Errorf("Hijack did not finish (chan still open)")
 		}
 	}()
 
@@ -2081,7 +2147,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		}
 	case err := <-errCh:
 		if err != nil {
-			utils.Debugf("Error hijack: %s", err)
+			log.Debugf("Error hijack: %s", err)
 			return err
 		}
 	}
@@ -2093,13 +2159,13 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 
 	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
 		if err := cli.monitorTtySize(runResult.Get("Id")); err != nil {
-			utils.Errorf("Error monitoring TTY size: %s\n", err)
+			log.Errorf("Error monitoring TTY size: %s", err)
 		}
 	}
 
 	if errCh != nil {
 		if err := <-errCh; err != nil {
-			utils.Debugf("Error hijack: %s", err)
+			log.Debugf("Error hijack: %s", err)
 			return err
 		}
 	}

+ 12 - 11
api/client/hijack.go

@@ -11,10 +11,11 @@ import (
 	"runtime"
 	"strings"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/utils"
 )
 
 func (cli *DockerCli) dial() (net.Conn, error) {
@@ -88,12 +89,12 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 			}()
 
 			// When TTY is ON, use regular copy
-			if setRawTerminal {
+			if setRawTerminal && stdout != nil {
 				_, err = io.Copy(stdout, br)
 			} else {
 				_, err = utils.StdCopy(stdout, stderr, br)
 			}
-			utils.Debugf("[hijack] End of stdout")
+			log.Debugf("[hijack] End of stdout")
 			return err
 		})
 	}
@@ -101,15 +102,15 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 	sendStdin := utils.Go(func() error {
 		if in != nil {
 			io.Copy(rwc, in)
-			utils.Debugf("[hijack] End of stdin")
+			log.Debugf("[hijack] End of stdin")
 		}
 		if tcpc, ok := rwc.(*net.TCPConn); ok {
 			if err := tcpc.CloseWrite(); err != nil {
-				utils.Debugf("Couldn't send EOF: %s\n", err)
+				log.Debugf("Couldn't send EOF: %s", err)
 			}
 		} else if unixc, ok := rwc.(*net.UnixConn); ok {
 			if err := unixc.CloseWrite(); err != nil {
-				utils.Debugf("Couldn't send EOF: %s\n", err)
+				log.Debugf("Couldn't send EOF: %s", err)
 			}
 		}
 		// Discard errors due to pipe interruption
@@ -118,14 +119,14 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 
 	if stdout != nil || stderr != nil {
 		if err := <-receiveStdout; err != nil {
-			utils.Debugf("Error receiveStdout: %s", err)
+			log.Debugf("Error receiveStdout: %s", err)
 			return err
 		}
 	}
 
 	if !cli.isTerminal {
 		if err := <-sendStdin; err != nil {
-			utils.Debugf("Error sendStdin: %s", err)
+			log.Debugf("Error sendStdin: %s", err)
 			return err
 		}
 	}

+ 10 - 9
api/client/utils.go

@@ -17,12 +17,13 @@ import (
 	"strings"
 	"syscall"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
 )
 
 var (
@@ -165,7 +166,7 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in
 		} else {
 			_, err = utils.StdCopy(stdout, stderr, resp.Body)
 		}
-		utils.Debugf("[stream] End of stdout")
+		log.Debugf("[stream] End of stdout")
 		return err
 	}
 	return nil
@@ -180,7 +181,7 @@ func (cli *DockerCli) resizeTty(id string) {
 	v.Set("h", strconv.Itoa(height))
 	v.Set("w", strconv.Itoa(width))
 	if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
-		utils.Debugf("Error resize: %s", err)
+		log.Debugf("Error resize: %s", err)
 	}
 }
 
@@ -237,7 +238,7 @@ func (cli *DockerCli) getTtySize() (int, int) {
 	}
 	ws, err := term.GetWinsize(cli.terminalFd)
 	if err != nil {
-		utils.Debugf("Error getting size: %s", err)
+		log.Debugf("Error getting size: %s", err)
 		if ws == nil {
 			return 0, 0
 		}

+ 7 - 6
api/common.go

@@ -5,19 +5,20 @@ import (
 	"mime"
 	"strings"
 
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/pkg/version"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/version"
 )
 
 const (
-	APIVERSION        version.Version = "1.13"
+	APIVERSION        version.Version = "1.14"
 	DEFAULTHTTPHOST                   = "127.0.0.1"
 	DEFAULTUNIXSOCKET                 = "/var/run/docker.sock"
 )
 
 func ValidateHost(val string) (string, error) {
-	host, err := utils.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val)
+	host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val)
 	if err != nil {
 		return val, err
 	}
@@ -42,7 +43,7 @@ func DisplayablePorts(ports *engine.Table) string {
 func MatchesContentType(contentType, expectedType string) bool {
 	mimetype, _, err := mime.ParseMediaType(contentType)
 	if err != nil {
-		utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
+		log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
 	}
 	return err == nil && mimetype == expectedType
 }

+ 2 - 1
api/server/MAINTAINERS

@@ -1,2 +1,3 @@
 Victor Vieux <vieux@docker.com> (@vieux)
-Johan Euphrosine <proppy@google.com> (@proppy)
+# off the grid until september
+# Johan Euphrosine <proppy@google.com> (@proppy)

+ 51 - 37
api/server/server.go

@@ -11,7 +11,6 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
-	"log"
 	"net"
 	"net/http"
 	"net/http/pprof"
@@ -21,16 +20,18 @@ import (
 	"syscall"
 
 	"code.google.com/p/go.net/websocket"
-
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/pkg/listenbuffer"
-	"github.com/dotcloud/docker/pkg/systemd"
-	"github.com/dotcloud/docker/pkg/user"
-	"github.com/dotcloud/docker/pkg/version"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/libcontainer/user"
 	"github.com/gorilla/mux"
+
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/listenbuffer"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/systemd"
+	"github.com/docker/docker/pkg/version"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
 )
 
 var (
@@ -87,7 +88,7 @@ func httpError(w http.ResponseWriter, err error) {
 	}
 
 	if err != nil {
-		utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
+		log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
 		http.Error(w, err.Error(), statusCode)
 	}
 }
@@ -237,10 +238,10 @@ func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseW
 		outsLegacy := engine.NewTable("Created", 0)
 		for _, out := range outs.Data {
 			for _, repoTag := range out.GetList("RepoTags") {
-				parts := strings.Split(repoTag, ":")
+				repo, tag := parsers.ParseRepositoryTag(repoTag)
 				outLegacy := &engine.Env{}
-				outLegacy.Set("Repository", parts[0])
-				outLegacy.Set("Tag", parts[1])
+				outLegacy.Set("Repository", repo)
+				outLegacy.SetJson("Tag", tag)
 				outLegacy.Set("Id", out.Get("Id"))
 				outLegacy.SetInt64("Created", out.GetInt64("Created"))
 				outLegacy.SetInt64("Size", out.GetInt64("Size"))
@@ -301,7 +302,7 @@ func getContainersChanges(eng *engine.Engine, version version.Version, w http.Re
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	var job = eng.Job("changes", vars["name"])
+	var job = eng.Job("container_changes", vars["name"])
 	streamJSON(job, w, false)
 
 	return job.Run()
@@ -338,6 +339,7 @@ func getContainersJSON(eng *engine.Engine, version version.Version, w http.Respo
 	job.Setenv("since", r.Form.Get("since"))
 	job.Setenv("before", r.Form.Get("before"))
 	job.Setenv("limit", r.Form.Get("limit"))
+	job.Setenv("filters", r.Form.Get("filters"))
 
 	if version.GreaterThanOrEqualTo("1.5") {
 		streamJSON(job, w, false)
@@ -437,7 +439,7 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit
 		stdoutBuffer = bytes.NewBuffer(nil)
 	)
 	if err := config.Decode(r.Body); err != nil {
-		utils.Errorf("%s", err)
+		log.Errorf("%s", err)
 	}
 
 	if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") {
@@ -468,6 +470,7 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon
 
 	var (
 		image = r.Form.Get("fromImage")
+		repo  = r.Form.Get("repo")
 		tag   = r.Form.Get("tag")
 		job   *engine.Job
 	)
@@ -482,18 +485,24 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon
 		}
 	}
 	if image != "" { //pull
+		if tag == "" {
+			image, tag = parsers.ParseRepositoryTag(image)
+		}
 		metaHeaders := map[string][]string{}
 		for k, v := range r.Header {
 			if strings.HasPrefix(k, "X-Meta-") {
 				metaHeaders[k] = v
 			}
 		}
-		job = eng.Job("pull", r.Form.Get("fromImage"), tag)
+		job = eng.Job("pull", image, tag)
 		job.SetenvBool("parallel", version.GreaterThan("1.3"))
 		job.SetenvJson("metaHeaders", metaHeaders)
 		job.SetenvJson("authConfig", authConfig)
 	} else { //import
-		job = eng.Job("import", r.Form.Get("fromSrc"), r.Form.Get("repo"), tag)
+		if tag == "" {
+			repo, tag = parsers.ParseRepositoryTag(repo)
+		}
+		job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag)
 		job.Stdin.Add(r.Body)
 	}
 
@@ -670,10 +679,12 @@ func deleteContainers(eng *engine.Engine, version version.Version, w http.Respon
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	job := eng.Job("container_delete", vars["name"])
+	job := eng.Job("delete", vars["name"])
+
+	job.Setenv("forceRemove", r.Form.Get("force"))
+
 	job.Setenv("removeVolume", r.Form.Get("v"))
 	job.Setenv("removeLink", r.Form.Get("link"))
-	job.Setenv("forceRemove", r.Form.Get("force"))
 	if err := job.Run(); err != nil {
 		return err
 	}
@@ -706,13 +717,16 @@ func postContainersStart(eng *engine.Engine, version version.Version, w http.Res
 	)
 
 	// allow a nil body for backwards compatibility
-	if r.Body != nil {
-		if api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
-			if err := job.DecodeEnv(r.Body); err != nil {
-				return err
-			}
+	if r.Body != nil && r.ContentLength > 0 {
+		if !api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
+			return fmt.Errorf("Content-Type of application/json is required")
+		}
+
+		if err := job.DecodeEnv(r.Body); err != nil {
+			return err
 		}
 	}
+
 	if err := job.Run(); err != nil {
 		if err.Error() == "Container already started" {
 			w.WriteHeader(http.StatusNotModified)
@@ -864,7 +878,7 @@ func wsContainersAttach(eng *engine.Engine, version version.Version, w http.Resp
 		job.Stdout.Add(ws)
 		job.Stderr.Set(ws)
 		if err := job.Run(); err != nil {
-			utils.Errorf("Error attaching websocket: %s", err)
+			log.Errorf("Error attaching websocket: %s", err)
 		}
 	})
 	h.ServeHTTP(w, r)
@@ -991,7 +1005,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
 	job := eng.Job("container_copy", vars["name"], copyData.Get("Resource"))
 	job.Stdout.Add(w)
 	if err := job.Run(); err != nil {
-		utils.Errorf("%s", err.Error())
+		log.Errorf("%s", err.Error())
 		if strings.Contains(err.Error(), "No such container") {
 			w.WriteHeader(http.StatusNotFound)
 		} else if strings.Contains(err.Error(), "no such file or directory") {
@@ -1019,16 +1033,16 @@ func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r
 func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc {
 	return func(w http.ResponseWriter, r *http.Request) {
 		// log the request
-		utils.Debugf("Calling %s %s", localMethod, localRoute)
+		log.Debugf("Calling %s %s", localMethod, localRoute)
 
 		if logging {
-			log.Println(r.Method, r.RequestURI)
+			log.Infof("%s %s", r.Method, r.RequestURI)
 		}
 
 		if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
 			userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
 			if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
-				utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
+				log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
 			}
 		}
 		version := version.Version(mux.Vars(r)["version"])
@@ -1045,7 +1059,7 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local
 		}
 
 		if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
-			utils.Errorf("Error making handler: %s", err)
+			log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
 			httpError(w, err)
 		}
 	}
@@ -1134,7 +1148,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
 
 	for method, routes := range m {
 		for route, fct := range routes {
-			utils.Debugf("Registering %s, %s", method, route)
+			log.Debugf("Registering %s, %s", method, route)
 			// NOTE: scope issue, make sure the variables are local and won't be changed
 			localRoute := route
 			localFct := fct
@@ -1181,7 +1195,7 @@ func ServeFd(addr string, handle http.Handler) error {
 	chErrors := make(chan error, len(ls))
 
 	// We don't want to start serving on these sockets until the
-	// "initserver" job has completed. Otherwise required handlers
+	// daemon is initialized and installed. Otherwise required handlers
 	// won't be ready.
 	<-activationLock
 
@@ -1224,7 +1238,7 @@ func changeGroup(addr string, nameOrGid string) error {
 		return err
 	}
 
-	utils.Debugf("%s group found. gid: %d", nameOrGid, gid)
+	log.Debugf("%s group found. gid: %d", nameOrGid, gid)
 	return os.Chown(addr, 0, gid)
 }
 
@@ -1295,7 +1309,7 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
 	switch proto {
 	case "tcp":
 		if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
-			log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
+			log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 		}
 	case "unix":
 		socketGroup := job.Getenv("SocketGroup")
@@ -1303,7 +1317,7 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
 			if err := changeGroup(addr, socketGroup); err != nil {
 				if socketGroup == "docker" {
 					// if the user hasn't explicitly specified the group ownership, don't fail on errors.
-					utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
+					log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
 				} else {
 					return err
 				}
@@ -1338,7 +1352,7 @@ func ServeApi(job *engine.Job) engine.Status {
 			return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
 		}
 		go func() {
-			log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
+			log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
 			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
 		}()
 	}

+ 219 - 17
api/server/server_unit_test.go

@@ -7,11 +7,13 @@ import (
 	"io"
 	"net/http"
 	"net/http/httptest"
+	"reflect"
 	"strings"
 	"testing"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/engine"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/version"
 )
 
 func TestGetBoolParam(t *testing.T) {
@@ -111,8 +113,105 @@ func TestGetInfo(t *testing.T) {
 	if v.GetInt("Containers") != 1 {
 		t.Fatalf("%#v\n", v)
 	}
-	if r.HeaderMap.Get("Content-Type") != "application/json" {
-		t.Fatalf("%#v\n", r)
+	assertContentType(r, "application/json", t)
+}
+
+func TestGetImagesJSON(t *testing.T) {
+	eng := engine.New()
+	var called bool
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		called = true
+		v := createEnvFromGetImagesJSONStruct(sampleImage)
+		if _, err := v.WriteTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	})
+	r := serveRequest("GET", "/images/json", nil, eng, t)
+	if !called {
+		t.Fatal("handler was not called")
+	}
+	assertHttpNotError(r, t)
+	assertContentType(r, "application/json", t)
+	var observed getImagesJSONStruct
+	if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil {
+		t.Fatal(err)
+	}
+	if !reflect.DeepEqual(observed, sampleImage) {
+		t.Errorf("Expected %#v but got %#v", sampleImage, observed)
+	}
+}
+
+func TestGetImagesJSONFilter(t *testing.T) {
+	eng := engine.New()
+	filter := "nothing"
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		filter = job.Getenv("filter")
+		return engine.StatusOK
+	})
+	serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t)
+	if filter != "aaaa" {
+		t.Errorf("%#v", filter)
+	}
+}
+
+func TestGetImagesJSONFilters(t *testing.T) {
+	eng := engine.New()
+	filter := "nothing"
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		filter = job.Getenv("filters")
+		return engine.StatusOK
+	})
+	serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t)
+	if filter != "nnnn" {
+		t.Errorf("%#v", filter)
+	}
+}
+
+func TestGetImagesJSONAll(t *testing.T) {
+	eng := engine.New()
+	allFilter := "-1"
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		allFilter = job.Getenv("all")
+		return engine.StatusOK
+	})
+	serveRequest("GET", "/images/json?all=1", nil, eng, t)
+	if allFilter != "1" {
+		t.Errorf("%#v", allFilter)
+	}
+}
+
+func TestGetImagesJSONLegacyFormat(t *testing.T) {
+	eng := engine.New()
+	var called bool
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		called = true
+		outsLegacy := engine.NewTable("Created", 0)
+		outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage))
+		if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	})
+	r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t)
+	if !called {
+		t.Fatal("handler was not called")
+	}
+	assertHttpNotError(r, t)
+	assertContentType(r, "application/json", t)
+	images := engine.NewTable("Created", 0)
+	if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil {
+		t.Fatal(err)
+	}
+	if images.Len() != 1 {
+		t.Fatalf("Expected 1 image, %d found", images.Len())
+	}
+	image := images.Data[0]
+	if image.Get("Tag") != "test-tag" {
+		t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag"))
+	}
+	if image.Get("Repository") != "test-name" {
+		t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository"))
 	}
 }
 
@@ -123,12 +222,12 @@ func TestGetContainersByName(t *testing.T) {
 	eng.Register("container_inspect", func(job *engine.Job) engine.Status {
 		called = true
 		if job.Args[0] != name {
-			t.Fatalf("name != '%s': %#v", name, job.Args[0])
+			t.Errorf("name != '%s': %#v", name, job.Args[0])
 		}
 		if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
-			t.Fatal("dirty env variable not set")
+			t.Errorf("dirty env variable not set")
 		} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
-			t.Fatal("dirty env variable set when it shouldn't")
+			t.Errorf("dirty env variable set when it shouldn't")
 		}
 		v := &engine.Env{}
 		v.SetBool("dirty", true)
@@ -141,9 +240,7 @@ func TestGetContainersByName(t *testing.T) {
 	if !called {
 		t.Fatal("handler was not called")
 	}
-	if r.HeaderMap.Get("Content-Type") != "application/json" {
-		t.Fatalf("%#v\n", r)
-	}
+	assertContentType(r, "application/json", t)
 	var stdoutJson interface{}
 	if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
 		t.Fatalf("%#v", err)
@@ -178,21 +275,19 @@ func TestGetEvents(t *testing.T) {
 	if !called {
 		t.Fatal("handler was not called")
 	}
-	if r.HeaderMap.Get("Content-Type") != "application/json" {
-		t.Fatalf("%#v\n", r)
-	}
+	assertContentType(r, "application/json", t)
 	var stdout_json struct {
 		Since int
 		Until int
 	}
 	if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
-		t.Fatalf("%#v", err)
+		t.Fatal(err)
 	}
 	if stdout_json.Since != 1 {
-		t.Fatalf("since != 1: %#v", stdout_json.Since)
+		t.Errorf("since != 1: %#v", stdout_json.Since)
 	}
 	if stdout_json.Until != 0 {
-		t.Fatalf("until != 0: %#v", stdout_json.Until)
+		t.Errorf("until != 0: %#v", stdout_json.Until)
 	}
 }
 
@@ -319,13 +414,77 @@ func TestGetImagesHistory(t *testing.T) {
 	}
 }
 
+func TestGetImagesByName(t *testing.T) {
+	eng := engine.New()
+	name := "image_name"
+	var called bool
+	eng.Register("image_inspect", func(job *engine.Job) engine.Status {
+		called = true
+		if job.Args[0] != name {
+			t.Fatalf("name != '%s': %#v", name, job.Args[0])
+		}
+		if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
+			t.Fatal("dirty env variable not set")
+		} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
+			t.Fatal("dirty env variable set when it shouldn't")
+		}
+		v := &engine.Env{}
+		v.SetBool("dirty", true)
+		if _, err := v.WriteTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	})
+	r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t)
+	if !called {
+		t.Fatal("handler was not called")
+	}
+	if r.HeaderMap.Get("Content-Type") != "application/json" {
+		t.Fatalf("%#v\n", r)
+	}
+	var stdoutJson interface{}
+	if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
+		t.Fatalf("%#v", err)
+	}
+	if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
+		t.Fatalf("%#v", stdoutJson)
+	}
+}
+
+func TestDeleteContainers(t *testing.T) {
+	eng := engine.New()
+	name := "foo"
+	var called bool
+	eng.Register("delete", func(job *engine.Job) engine.Status {
+		called = true
+		if len(job.Args) == 0 {
+			t.Fatalf("Job arguments is empty")
+		}
+		if job.Args[0] != name {
+			t.Fatalf("name != '%s': %#v", name, job.Args[0])
+		}
+		return engine.StatusOK
+	})
+	r := serveRequest("DELETE", "/containers/"+name, nil, eng, t)
+	if !called {
+		t.Fatalf("handler was not called")
+	}
+	if r.Code != http.StatusNoContent {
+		t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent)
+	}
+}
+
 func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
+	return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t)
+}
+
+func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
 	r := httptest.NewRecorder()
 	req, err := http.NewRequest(method, target, body)
 	if err != nil {
 		t.Fatal(err)
 	}
-	if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+	if err := ServeRequest(eng, version, r, req); err != nil {
 		t.Fatal(err)
 	}
 	return r
@@ -351,3 +510,46 @@ func toJson(data interface{}, t *testing.T) io.Reader {
 	}
 	return &buf
 }
+
+func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) {
+	if recorder.HeaderMap.Get("Content-Type") != content_type {
+		t.Fatalf("%#v\n", recorder)
+	}
+}
+
+// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that
+// should die as soon as we converted all integration tests?
+// assertHttpNotError expect the given response to not have an error.
+// Otherwise the it causes the test to fail.
+func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) {
+	// Non-error http status are [200, 400)
+	if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
+		t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
+	}
+}
+
+func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env {
+	v := &engine.Env{}
+	v.SetList("RepoTags", data.RepoTags)
+	v.Set("Id", data.Id)
+	v.SetInt64("Created", data.Created)
+	v.SetInt64("Size", data.Size)
+	v.SetInt64("VirtualSize", data.VirtualSize)
+	return v
+}
+
+type getImagesJSONStruct struct {
+	RepoTags    []string
+	Id          string
+	Created     int64
+	Size        int64
+	VirtualSize int64
+}
+
+var sampleImage getImagesJSONStruct = getImagesJSONStruct{
+	RepoTags:    []string{"test-name:test-tag"},
+	Id:          "ID",
+	Created:     999,
+	Size:        777,
+	VirtualSize: 666,
+}

+ 2 - 1
archive/MAINTAINERS

@@ -1 +1,2 @@
-Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
+Tibor Vass <teabee89@gmail.com> (@tiborvass)

+ 65 - 39
archive/archive.go

@@ -16,9 +16,11 @@ import (
 	"strings"
 	"syscall"
 
-	"github.com/dotcloud/docker/pkg/system"
-	"github.com/dotcloud/docker/utils"
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/system"
+	"github.com/docker/docker/utils"
 )
 
 type (
@@ -61,7 +63,7 @@ func DetectCompression(source []byte) Compression {
 		Xz:    {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
 	} {
 		if len(source) < len(m) {
-			utils.Debugf("Len too short")
+			log.Debugf("Len too short")
 			continue
 		}
 		if bytes.Compare(m, source[:len(m)]) == 0 {
@@ -83,7 +85,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
 	if err != nil {
 		return nil, err
 	}
-	utils.Debugf("[tar autodetect] n: %v", bs)
+	log.Debugf("[tar autodetect] n: %v", bs)
 
 	compression := DetectCompression(bs)
 
@@ -131,7 +133,7 @@ func (compression *Compression) Extension() string {
 	return ""
 }
 
-func addTarFile(path, name string, tw *tar.Writer) error {
+func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error {
 	fi, err := os.Lstat(path)
 	if err != nil {
 		return err
@@ -177,15 +179,22 @@ func addTarFile(path, name string, tw *tar.Writer) error {
 	}
 
 	if hdr.Typeflag == tar.TypeReg {
-		if file, err := os.Open(path); err != nil {
+		file, err := os.Open(path)
+		if err != nil {
 			return err
-		} else {
-			_, err := io.Copy(tw, file)
-			if err != nil {
-				return err
-			}
-			file.Close()
 		}
+
+		twBuf.Reset(tw)
+		_, err = io.Copy(twBuf, file)
+		file.Close()
+		if err != nil {
+			return err
+		}
+		err = twBuf.Flush()
+		if err != nil {
+			return err
+		}
+		twBuf.Reset(nil)
 	}
 
 	return nil
@@ -245,7 +254,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
 		}
 
 	case tar.TypeXGlobalHeader:
-		utils.Debugf("PAX Global Extended Headers found and ignored")
+		log.Debugf("PAX Global Extended Headers found and ignored")
 		return nil
 
 	default:
@@ -328,10 +337,12 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
 			options.Includes = []string{"."}
 		}
 
+		twBuf := bufio.NewWriterSize(nil, twBufSize)
+
 		for _, include := range options.Includes {
 			filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
 				if err != nil {
-					utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
+					log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
 					return nil
 				}
 
@@ -340,23 +351,21 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
 					return nil
 				}
 
-				for _, exclude := range options.Excludes {
-					matched, err := filepath.Match(exclude, relFilePath)
-					if err != nil {
-						utils.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
-						return err
-					}
-					if matched {
-						utils.Debugf("Skipping excluded path: %s", relFilePath)
-						if f.IsDir() {
-							return filepath.SkipDir
-						}
-						return nil
+				skip, err := utils.Matches(relFilePath, options.Excludes)
+				if err != nil {
+					log.Debugf("Error matching %s", relFilePath, err)
+					return err
+				}
+
+				if skip {
+					if f.IsDir() {
+						return filepath.SkipDir
 					}
+					return nil
 				}
 
-				if err := addTarFile(filePath, relFilePath, tw); err != nil {
-					utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
+				if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil {
+					log.Debugf("Can't add file %s to tar: %s", srcPath, err)
 				}
 				return nil
 			})
@@ -364,13 +373,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
 
 		// Make sure to check the error on Close.
 		if err := tw.Close(); err != nil {
-			utils.Debugf("Can't close tar writer: %s\n", err)
+			log.Debugf("Can't close tar writer: %s", err)
 		}
 		if err := compressWriter.Close(); err != nil {
-			utils.Debugf("Can't close compress writer: %s\n", err)
+			log.Debugf("Can't close compress writer: %s", err)
 		}
 		if err := pipeWriter.Close(); err != nil {
-			utils.Debugf("Can't close pipe writer: %s\n", err)
+			log.Debugf("Can't close pipe writer: %s", err)
 		}
 	}()
 
@@ -383,10 +392,18 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
 //  identity (uncompressed), gzip, bzip2, xz.
 // FIXME: specify behavior when target path exists vs. doesn't exist.
 func Untar(archive io.Reader, dest string, options *TarOptions) error {
+	if options == nil {
+		options = &TarOptions{}
+	}
+
 	if archive == nil {
 		return fmt.Errorf("Empty archive")
 	}
 
+	if options.Excludes == nil {
+		options.Excludes = []string{}
+	}
+
 	decompressedArchive, err := DecompressStream(archive)
 	if err != nil {
 		return err
@@ -394,10 +411,12 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
 	defer decompressedArchive.Close()
 
 	tr := tar.NewReader(decompressedArchive)
+	trBuf := bufio.NewReaderSize(nil, trBufSize)
 
 	var dirs []*tar.Header
 
 	// Iterate through the files in the archive.
+loop:
 	for {
 		hdr, err := tr.Next()
 		if err == io.EOF {
@@ -411,6 +430,12 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
 		// Normalize name, for safety and for a simple is-root check
 		hdr.Name = filepath.Clean(hdr.Name)
 
+		for _, exclude := range options.Excludes {
+			if strings.HasPrefix(hdr.Name, exclude) {
+				continue loop
+			}
+		}
+
 		if !strings.HasSuffix(hdr.Name, "/") {
 			// Not the root directory, ensure that the parent directory exists
 			parent := filepath.Dir(hdr.Name)
@@ -439,7 +464,8 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
 				}
 			}
 		}
-		if err := createTarFile(path, dest, hdr, tr, options == nil || !options.NoLchown); err != nil {
+		trBuf.Reset(tr)
+		if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
 			return err
 		}
 
@@ -465,7 +491,7 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
 // the output of one piped into the other. If either Tar or Untar fails,
 // TarUntar aborts and returns the error.
 func TarUntar(src string, dst string) error {
-	utils.Debugf("TarUntar(%s %s)", src, dst)
+	log.Debugf("TarUntar(%s %s)", src, dst)
 	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
 	if err != nil {
 		return err
@@ -502,11 +528,11 @@ func CopyWithTar(src, dst string) error {
 		return CopyFileWithTar(src, dst)
 	}
 	// Create dst, copy src's content into it
-	utils.Debugf("Creating dest directory: %s", dst)
+	log.Debugf("Creating dest directory: %s", dst)
 	if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
 		return err
 	}
-	utils.Debugf("Calling TarUntar(%s, %s)", src, dst)
+	log.Debugf("Calling TarUntar(%s, %s)", src, dst)
 	return TarUntar(src, dst)
 }
 
@@ -517,7 +543,7 @@ func CopyWithTar(src, dst string) error {
 // If `dst` ends with a trailing slash '/', the final destination path
 // will be `dst/base(src)`.
 func CopyFileWithTar(src, dst string) (err error) {
-	utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+	log.Debugf("CopyFileWithTar(%s, %s)", src, dst)
 	srcSt, err := os.Stat(src)
 	if err != nil {
 		return err
@@ -544,19 +570,19 @@ func CopyFileWithTar(src, dst string) (err error) {
 		}
 		defer srcF.Close()
 
-		tw := tar.NewWriter(w)
 		hdr, err := tar.FileInfoHeader(srcSt, "")
 		if err != nil {
 			return err
 		}
 		hdr.Name = filepath.Base(dst)
+		tw := tar.NewWriter(w)
+		defer tw.Close()
 		if err := tw.WriteHeader(hdr); err != nil {
 			return err
 		}
 		if _, err := io.Copy(tw, srcF); err != nil {
 			return err
 		}
-		tw.Close()
 		return nil
 	})
 	defer func() {

+ 45 - 2
archive/archive_test.go

@@ -11,7 +11,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 func TestCmdStreamLargeStderr(t *testing.T) {
@@ -109,6 +109,9 @@ func TestTarUntar(t *testing.T) {
 	if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
 		t.Fatal(err)
 	}
+	if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
+		t.Fatal(err)
+	}
 
 	for _, c := range []Compression{
 		Uncompressed,
@@ -116,13 +119,14 @@ func TestTarUntar(t *testing.T) {
 	} {
 		changes, err := tarUntar(t, origin, &TarOptions{
 			Compression: c,
+			Excludes:    []string{"3"},
 		})
 
 		if err != nil {
 			t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
 		}
 
-		if len(changes) != 0 {
+		if len(changes) != 1 || changes[0].Path != "/3" {
 			t.Fatalf("Unexpected differences after tarUntar: %v", changes)
 		}
 	}
@@ -199,3 +203,42 @@ func TestUntarUstarGnuConflict(t *testing.T) {
 		t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
 	}
 }
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) {
+	fileData := []byte("fooo")
+	for n := 0; n < numberOfFiles; n++ {
+		fileName := fmt.Sprintf("file-%d", n)
+		if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+			return 0, err
+		}
+	}
+	totalSize := numberOfFiles * len(fileData)
+	return totalSize, nil
+}
+
+func BenchmarkTarUntar(b *testing.B) {
+	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+	if err != nil {
+		b.Fatal(err)
+	}
+	tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
+	if err != nil {
+		b.Fatal(err)
+	}
+	target := path.Join(tempDir, "dest")
+	n, err := prepareUntarSourceDirectory(100, origin)
+	if err != nil {
+		b.Fatal(err)
+	}
+	b.ResetTimer()
+	b.SetBytes(int64(n))
+	defer os.RemoveAll(origin)
+	defer os.RemoveAll(tempDir)
+	for n := 0; n < b.N; n++ {
+		err := TarUntar(origin, target)
+		if err != nil {
+			b.Fatal(err)
+		}
+		os.RemoveAll(target)
+	}
+}

+ 10 - 7
archive/changes.go

@@ -1,6 +1,7 @@
 package archive
 
 import (
+	"bufio"
 	"bytes"
 	"fmt"
 	"io"
@@ -10,9 +11,10 @@ import (
 	"syscall"
 	"time"
 
-	"github.com/dotcloud/docker/pkg/system"
-	"github.com/dotcloud/docker/utils"
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/system"
 )
 
 type ChangeType int
@@ -343,6 +345,7 @@ func ExportChanges(dir string, changes []Change) (Archive, error) {
 	tw := tar.NewWriter(writer)
 
 	go func() {
+		twBuf := bufio.NewWriterSize(nil, twBufSize)
 		// In general we log errors here but ignore them because
 		// during e.g. a diff operation the container can continue
 		// mutating the filesystem and we can see transient errors
@@ -361,19 +364,19 @@ func ExportChanges(dir string, changes []Change) (Archive, error) {
 					ChangeTime: timestamp,
 				}
 				if err := tw.WriteHeader(hdr); err != nil {
-					utils.Debugf("Can't write whiteout header: %s\n", err)
+					log.Debugf("Can't write whiteout header: %s", err)
 				}
 			} else {
 				path := filepath.Join(dir, change.Path)
-				if err := addTarFile(path, change.Path[1:], tw); err != nil {
-					utils.Debugf("Can't add file %s to tar: %s\n", path, err)
+				if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil {
+					log.Debugf("Can't add file %s to tar: %s", path, err)
 				}
 			}
 		}
 
 		// Make sure to check the error on Close.
 		if err := tw.Close(); err != nil {
-			utils.Debugf("Can't close layer: %s\n", err)
+			log.Debugf("Can't close layer: %s", err)
 		}
 		writer.Close()
 	}()

+ 4 - 0
archive/common.go

@@ -0,0 +1,4 @@
+package archive
+
+const twBufSize = 32 * 1024
+const trBufSize = 32 * 1024

+ 5 - 2
archive/diff.go

@@ -1,6 +1,7 @@
 package archive
 
 import (
+	"bufio"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -9,7 +10,7 @@ import (
 	"strings"
 	"syscall"
 
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
@@ -32,6 +33,7 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
 	}
 
 	tr := tar.NewReader(layer)
+	trBuf := bufio.NewReaderSize(nil, trBufSize)
 
 	var dirs []*tar.Header
 
@@ -108,7 +110,8 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
 				}
 			}
 
-			srcData := io.Reader(tr)
+			trBuf.Reset(tr)
+			srcData := io.Reader(trBuf)
 			srcHdr := hdr
 
 			// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so

+ 1 - 1
archive/wrap.go

@@ -2,7 +2,7 @@ package archive
 
 import (
 	"bytes"
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 	"io/ioutil"
 )
 

+ 12 - 12
builtins/builtins.go

@@ -3,14 +3,14 @@ package builtins
 import (
 	"runtime"
 
-	"github.com/dotcloud/docker/api"
-	apiserver "github.com/dotcloud/docker/api/server"
-	"github.com/dotcloud/docker/daemon/networkdriver/bridge"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/server"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/api"
+	apiserver "github.com/docker/docker/api/server"
+	"github.com/docker/docker/daemon/networkdriver/bridge"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/events"
+	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/docker/registry"
 )
 
 func Register(eng *engine.Engine) error {
@@ -20,6 +20,9 @@ func Register(eng *engine.Engine) error {
 	if err := remote(eng); err != nil {
 		return err
 	}
+	if err := events.New().Install(eng); err != nil {
+		return err
+	}
 	if err := eng.Register("version", dockerVersion); err != nil {
 		return err
 	}
@@ -50,9 +53,6 @@ func remote(eng *engine.Engine) error {
 // These components should be broken off into plugins of their own.
 //
 func daemon(eng *engine.Engine) error {
-	if err := eng.Register("initserver", server.InitServer); err != nil {
-		return err
-	}
 	return eng.Register("init_networkdriver", bridge.InitDriver)
 }
 
@@ -65,7 +65,7 @@ func dockerVersion(job *engine.Job) engine.Status {
 	v.Set("GoVersion", runtime.Version())
 	v.Set("Os", runtime.GOOS)
 	v.Set("Arch", runtime.GOARCH)
-	if kernelVersion, err := utils.GetKernelVersion(); err == nil {
+	if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
 		v.Set("KernelVersion", kernelVersion.String())
 	}
 	if _, err := v.WriteTo(job.Stdout); err != nil {

+ 17 - 0
contrib/check-config.sh

@@ -113,6 +113,23 @@ else
 	echo "    $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
 fi
 
+if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
+	echo -n '- '
+	if command -v apparmor_parser &> /dev/null; then
+		echo "$(wrap_good 'apparmor' 'enabled and tools installed')"
+	else
+		echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')"
+		echo -n '    '
+		if command -v apt-get &> /dev/null; then
+			echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')"
+		elif command -v yum &> /dev/null; then
+			echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')"
+		else
+			echo "$(wrap_color '(look for an "apparmor" package for your distribution)')"
+		fi
+	fi
+fi
+
 flags=(
 	NAMESPACES {NET,PID,IPC,UTS}_NS
 	DEVPTS_MULTIPLE_INSTANCES

+ 35 - 4
contrib/completion/bash/docker

@@ -156,7 +156,7 @@ _docker_build()
 		*)
 			local counter="$(__docker_pos_first_nonflag '-t|--tag')"
 			if [ $cword -eq $counter ]; then
-				_filedir
+				_filedir -d
 			fi
 			;;
 	esac
@@ -485,21 +485,52 @@ _docker_rmi()
 _docker_run()
 {
 	case "$prev" in
-		--cidfile)
+		-a|--attach)
+			COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) )
+			return
+			;;
+		--cidfile|--env-file)
 			_filedir
+			return
 			;;
 		--volumes-from)
 			__docker_containers_all
+			return
 			;;
 		-v|--volume)
-			# TODO something magical with colons and _filedir ?
+			case "$cur" in
+				*:*)
+					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
+					;;
+				'')
+					COMPREPLY=( $( compgen -W '/' -- "$cur" ) )
+					compopt -o nospace
+					;;
+				/*)
+					_filedir
+					compopt -o nospace
+					;;
+			esac
 			return
 			;;
 		-e|--env)
 			COMPREPLY=( $( compgen -e -- "$cur" ) )
+			compopt -o nospace
+			return
+			;;
+		--link)
+			case "$cur" in
+				*:*)
+					;;
+				*)
+					__docker_containers_running
+					COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
+					compopt -o nospace
+					;;
+			esac
 			return
 			;;
-		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf)
+		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf)
 			return
 			;;
 		*)

+ 1 - 1
contrib/completion/fish/docker.fish

@@ -85,7 +85,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Conf
 complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container"
 
 # cp
-complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from a container's filesystem to the host path'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path"
 
 # diff
 complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem"

+ 234 - 61
contrib/completion/zsh/_docker

@@ -1,58 +1,118 @@
-#compdef docker 
+#compdef docker
 #
 # zsh completion for docker (http://docker.com)
 #
-# version:  0.2.2
-# author:   Felix Riedel
-# license:  BSD License
+# version:  0.3.0
 # github:   https://github.com/felixr/docker-zsh-completion
 #
+# contributers:
+#   - Felix Riedel
+#   - Vincent Bernat
+#
+# license:
+#
+# Copyright (c) 2013, Felix Riedel
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of the <organization> nor the
+#       names of its contributors may be used to endorse or promote products
+#       derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
 
 __parse_docker_list() {
-    sed -e '/^ID/d' -e 's/[ ]\{2,\}/|/g' -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/' | awk ' BEGIN {FS="|"} { printf("%s:%7s, %s\n", $1, $4, $2)}'
+        awk '
+NR == 1 {
+    idx=1;i=0;f[i]=0
+    header=$0
+    while ( match(header, /  ([A-Z]+|[A-Z]+ [A-Z]+)/) ) {
+        idx += RSTART+1
+        f[++i]=idx
+        header = substr($0,idx)
+    }
+    f[++i]=999
+}
+
+NR > 1 '"$1"' {
+    for(j=0;j<i;j++) {
+        x[j] = substr($0, f[j], f[j+1]-f[j]-1)
+        gsub(/[ ]+$/, "", x[j])
+    }
+    printf("%s:%7s, %s\n", x[0], x[3], x[1])
+    if (x[6] != "") {
+       split(x[6], names, /,/)
+       for (name in names) printf("%s:%7s, %s\n", names[name], x[3], x[1])
+    }
+}
+'| sed -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/'
 }
 
 __docker_stoppedcontainers() {
     local expl
-    declare -a stoppedcontainers 
-    stoppedcontainers=(${(f)"$(docker ps -a | grep --color=never 'Exit' |  __parse_docker_list )"})
-    _describe -t containers-stopped "Stopped Containers" stoppedcontainers 
+    declare -a stoppedcontainers
+    stoppedcontainers=(${(f)"$(_call_program commands docker ps -a |  __parse_docker_list '&& / Exit/')"})
+    _describe -t containers-stopped "Stopped Containers" stoppedcontainers "$@"
 }
 
 __docker_runningcontainers() {
     local expl
-    declare -a containers 
+    declare -a containers
 
-    containers=(${(f)"$(docker ps | __parse_docker_list)"})
-    _describe -t containers-active "Running Containers" containers 
+    containers=(${(f)"$(_call_program commands docker ps | __parse_docker_list)"})
+    _describe -t containers-active "Running Containers" containers "$@"
 }
 
 __docker_containers () {
-    __docker_stoppedcontainers 
-    __docker_runningcontainers
+    __docker_stoppedcontainers "$@"
+    __docker_runningcontainers "$@"
 }
 
 __docker_images () {
     local expl
     declare -a images
-    images=(${(f)"$(docker images | awk '(NR > 1){printf("%s\\:%s\n", $1,$2)}')"})
-    images=($images ${(f)"$(docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"})
+    images=(${(f)"$(_call_program commands docker images | awk '(NR > 1 && $1 != "<none>"){printf("%s", $1);if ($2 != "<none>") printf("\\:%s", $2); printf("\n")}')"})
+    images=($images ${(f)"$(_call_program commands docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"})
     _describe -t docker-images "Images" images
 }
 
 __docker_tags() {
     local expl
     declare -a tags
-    tags=(${(f)"$(docker images | awk '(NR>1){print $2}'| sort | uniq)"})
+    tags=(${(f)"$(_call_program commands docker images | awk '(NR>1){print $2}'| sort | uniq)"})
     _describe -t docker-tags "tags" tags
 }
 
+__docker_repositories_with_tags() {
+    if compset -P '*:'; then
+        __docker_tags
+    else
+        __docker_repositories -qS ":"
+    fi
+}
+
 __docker_search() {
     # declare -a dockersearch
     local cache_policy
     zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
     if [[ -z "$cache_policy" ]]; then
-        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy 
+        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
     fi
 
     local searchterm cachename
@@ -60,14 +120,14 @@ __docker_search() {
     cachename=_docker-search-$searchterm
 
     local expl
-    local -a result 
+    local -a result
     if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \
         && ! _retrieve_cache ${cachename#_}; then
         _message "Searching for ${searchterm}..."
-        result=(${(f)"$(docker search ${searchterm} | awk '(NR>2){print $1}')"})
+        result=(${(f)"$(_call_program commands docker search ${searchterm} | awk '(NR>2){print $1}')"})
         _store_cache ${cachename#_} result
-    fi 
-    _wanted dockersearch expl 'Available images' compadd -a result 
+    fi
+    _wanted dockersearch expl 'Available images' compadd -a result
 }
 
 __docker_caching_policy()
@@ -81,8 +141,8 @@ __docker_caching_policy()
 __docker_repositories () {
     local expl
     declare -a repos
-    repos=(${(f)"$(docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"})
-    _describe -t docker-repos "Repositories" repos
+    repos=(${(f)"$(_call_program commands docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"})
+    _describe -t docker-repos "Repositories" repos "$@"
 }
 
 __docker_commands () {
@@ -91,15 +151,15 @@ __docker_commands () {
 
     zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
     if [[ -z "$cache_policy" ]]; then
-        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy 
+        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
     fi
 
     if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \
-        && ! _retrieve_cache docker_subcommands; 
+        && ! _retrieve_cache docker_subcommands;
     then
-        _docker_subcommands=(${${(f)"$(_call_program commands 
+        _docker_subcommands=(${${(f)"$(_call_program commands
         docker 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}})
-        _docker_subcommands=($_docker_subcommands 'help:Show help for a command') 
+        _docker_subcommands=($_docker_subcommands 'help:Show help for a command')
         _store_cache docker_subcommands _docker_subcommands
     fi
     _describe -t docker-commands "docker command" _docker_subcommands
@@ -108,100 +168,206 @@ __docker_commands () {
 __docker_subcommand () {
     local -a _command_args
     case "$words[1]" in
-        (attach|wait)
-            _arguments ':containers:__docker_runningcontainers'
+        (attach)
+            _arguments \
+                '--no-stdin[Do not attach stdin]' \
+                '--sig-proxy[Proxify all received signal]' \
+                ':containers:__docker_runningcontainers'
             ;;
         (build)
             _arguments \
-                '-t=-:repository:__docker_repositories' \
+                '--no-cache[Do not use cache when building the image]' \
+                '-q[Suppress verbose build output]' \
+                '--rm[Remove intermediate containers after a successful build]' \
+                '-t=-:repository:__docker_repositories_with_tags' \
                 ':path or URL:_directories'
             ;;
         (commit)
             _arguments \
+                '--author=-[Author]:author: ' \
+                '-m=-[Commit message]:message: ' \
+                '--run=-[Configuration automatically applied when the image is run]:configuration: ' \
                 ':container:__docker_containers' \
-                ':repository:__docker_repositories' \
-                ':tag: '
+                ':repository:__docker_repositories_with_tags'
+            ;;
+        (cp)
+            _arguments \
+                ':container:->container' \
+                ':hostpath:_files'
+            case $state in
+                (container)
+                    if compset -P '*:'; then
+                        _files
+                    else
+                        __docker_containers -qS ":"
+                    fi
+                    ;;
+            esac
             ;;
-        (diff|export|logs)
+        (diff|export)
             _arguments '*:containers:__docker_containers'
             ;;
         (history)
-            _arguments '*:images:__docker_images'
+            _arguments \
+                '--no-trunc[Do not truncate output]' \
+                '-q[Only show numeric IDs]' \
+                '*:images:__docker_images'
             ;;
         (images)
             _arguments \
                 '-a[Show all images]' \
+                '--no-trunc[Do not truncate output]' \
+                '-q[Only show numeric IDs]' \
+                '--tree[Output graph in tree format]' \
+                '--viz[Output graph in graphviz format]' \
                 ':repository:__docker_repositories'
             ;;
         (inspect)
-            _arguments '*:containers:__docker_containers'
+            _arguments \
+                '--format=-[Format the output using the given go template]:template: ' \
+                '*:containers:__docker_containers'
             ;;
-        (history)
-            _arguments ':images:__docker_images'
+        (import)
+            _arguments \
+                ':URL:(- http:// file://)' \
+                ':repository:__docker_repositories_with_tags'
+            ;;
+        (info)
+            ;;
+        (import)
+            _arguments \
+                ':URL:(- http:// file://)' \
+                ':repository:__docker_repositories_with_tags'
+            ;;
+        (insert)
+            _arguments '1:containers:__docker_containers' \
+                       '2:URL:(http:// file://)' \
+                       '3:file:_files'
             ;;
         (kill)
             _arguments '*:containers:__docker_runningcontainers'
             ;;
+        (load)
+            ;;
+        (login)
+            _arguments \
+                '-e=-[Email]:email: ' \
+                '-p=-[Password]:password: ' \
+                '-u=-[Username]:username: ' \
+                ':server: '
+            ;;
+        (logs)
+            _arguments \
+                '-f[Follow log output]' \
+                '*:containers:__docker_containers'
+            ;;
         (port)
-            _arguments '1:containers:__docker_runningcontainers'
+            _arguments \
+                '1:containers:__docker_runningcontainers' \
+                '2:port:_ports'
             ;;
         (start)
-            _arguments '*:containers:__docker_stoppedcontainers'
+            _arguments \
+                '-a[Attach container'"'"'s stdout/stderr and forward all signals]' \
+                '-i[Attach container'"'"'s stding]' \
+                '*:containers:__docker_stoppedcontainers'
             ;;
         (rm)
-            _arguments '-v[Remove the volumes associated to the container]' \
+            _arguments \
+                '--link[Remove the specified link and not the underlying container]' \
+                '-v[Remove the volumes associated to the container]' \
                 '*:containers:__docker_stoppedcontainers'
             ;;
         (rmi)
-            _arguments '-v[Remove the volumes associated to the container]' \
+            _arguments \
                 '*:images:__docker_images'
             ;;
-        (top)
-            _arguments '1:containers:__docker_runningcontainers'
-            ;;
         (restart|stop)
             _arguments '-t=-[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \
                 '*:containers:__docker_runningcontainers'
             ;;
         (top)
-            _arguments ':containers:__docker_runningcontainers'
+            _arguments \
+                '1:containers:__docker_runningcontainers' \
+                '(-)*:: :->ps-arguments'
+            case $state in
+                (ps-arguments)
+                    _ps
+                    ;;
+            esac
+
             ;;
         (ps)
-            _arguments '-a[Show all containers. Only running containers are shown by default]' \
-                '-h[Show help]' \
-                '--before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
-            '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)'
+            _arguments \
+                '-a[Show all containers]' \
+                '--before=-[Show only container created before...]:containers:__docker_containers' \
+                '-l[Show only the latest created container]' \
+                '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \
+                '--no-trunc[Do not truncate output]' \
+                '-q[Only show numeric IDs]' \
+                '-s[Display sizes]' \
+                '--since=-[Show only containers created since...]:containers:__docker_containers'
             ;;
         (tag)
             _arguments \
                 '-f[force]'\
                 ':image:__docker_images'\
-                ':repository:__docker_repositories' \
-                ':tag:__docker_tags'
+                ':repository:__docker_repositories_with_tags'
             ;;
         (run)
             _arguments \
-                '-a=-[Attach to stdin, stdout or stderr]:toggle:(true false)' \
-                '-c=-[CPU shares (relative weight)]:CPU shares: ' \
+                '-P[Publish all exposed ports to the host]' \
+                '-a[Attach to stdin, stdout or stderr]' \
+                '-c=-[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \
+                '--cidfile=-[Write the container ID to the file]:CID file:_files' \
                 '-d[Detached mode: leave the container running in the background]' \
-                '*--dns=[Set custom dns servers]:dns server: ' \
-                '*-e=[Set environment variables]:environment variable: ' \
+                '*--dns=-[Set custom dns servers]:dns server: ' \
+                '*-e=-[Set environment variables]:environment variable: ' \
                 '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \
+                '*--expose=-[Expose a port from the container without publishing it]: ' \
                 '-h=-[Container host name]:hostname:_hosts' \
                 '-i[Keep stdin open even if not attached]' \
+                '--link=-[Add link to another container]:link:->link' \
+                '--lxc-conf=-[Add custom lxc options]:lxc options: ' \
                 '-m=-[Memory limit (in bytes)]:limit: ' \
-                '*-p=-[Expose a container''s port to the host]:port:_ports' \
-                '-t=-[Allocate a pseudo-tty]:toggle:(true false)' \
+                '--name=-[Container name]:name: ' \
+                '*-p=-[Expose a container'"'"'s port to the host]:port:_ports' \
+                '--privileged[Give extended privileges to this container]' \
+                '--rm[Remove intermediate containers when it exits]' \
+                '--sig-proxy[Proxify all received signal]' \
+                '-t[Allocate a pseudo-tty]' \
                 '-u=-[Username or UID]:user:_users' \
                 '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\
                 '--volumes-from=-[Mount volumes from the specified container]:volume: ' \
+                '-w=-[Working directory inside the container]:directory:_directories' \
                 '(-):images:__docker_images' \
                 '(-):command: _command_names -e' \
                 '*::arguments: _normal'
-                ;;
+
+            case $state in
+                (link)
+                    if compset -P '*:'; then
+                        _wanted alias expl 'Alias' compadd -E ""
+                    else
+                        __docker_runningcontainers -qS ":"
+                    fi
+                    ;;
+            esac
+
+            ;;
         (pull|search)
             _arguments ':name:__docker_search'
             ;;
+        (push)
+            _arguments ':repository:__docker_repositories_with_tags'
+            ;;
+        (save)
+            _arguments \
+                ':images:__docker_images'
+            ;;
+        (wait)
+            _arguments ':containers:__docker_runningcontainers'
+            ;;
         (help)
             _arguments ':subcommand:__docker_commands'
             ;;
@@ -212,24 +378,31 @@ __docker_subcommand () {
 }
 
 _docker () {
+    # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`.
+    # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`.
+    if [[ $service != docker ]]; then
+        _call_function - _$service
+        return
+    fi
+
     local curcontext="$curcontext" state line
     typeset -A opt_args
 
     _arguments -C \
       '-H=-[tcp://host:port to bind/connect to]:socket: ' \
          '(-): :->command' \
-         '(-)*:: :->option-or-argument' 
+         '(-)*:: :->option-or-argument'
 
     if (( CURRENT == 1 )); then
 
     fi
-    case $state in 
+    case $state in
         (command)
             __docker_commands
             ;;
         (option-or-argument)
             curcontext=${curcontext%:*:*}:docker-$words[1]:
-            __docker_subcommand 
+            __docker_subcommand
             ;;
     esac
 }

+ 0 - 125
contrib/crashTest.go

@@ -1,125 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io"
-	"log"
-	"net"
-	"os"
-	"os/exec"
-	"path"
-	"time"
-)
-
-var DOCKERPATH = path.Join(os.Getenv("DOCKERPATH"), "docker")
-
-// WARNING: this crashTest will 1) crash your host, 2) remove all containers
-func runDaemon() (*exec.Cmd, error) {
-	os.Remove("/var/run/docker.pid")
-	exec.Command("rm", "-rf", "/var/lib/docker/containers").Run()
-	cmd := exec.Command(DOCKERPATH, "-d")
-	outPipe, err := cmd.StdoutPipe()
-	if err != nil {
-		return nil, err
-	}
-	errPipe, err := cmd.StderrPipe()
-	if err != nil {
-		return nil, err
-	}
-	if err := cmd.Start(); err != nil {
-		return nil, err
-	}
-	go func() {
-		io.Copy(os.Stdout, outPipe)
-	}()
-	go func() {
-		io.Copy(os.Stderr, errPipe)
-	}()
-	return cmd, nil
-}
-
-func crashTest() error {
-	if err := exec.Command("/bin/bash", "-c", "while true; do true; done").Start(); err != nil {
-		return err
-	}
-
-	var endpoint string
-	if ep := os.Getenv("TEST_ENDPOINT"); ep == "" {
-		endpoint = "192.168.56.1:7979"
-	} else {
-		endpoint = ep
-	}
-
-	c := make(chan bool)
-	var conn io.Writer
-
-	go func() {
-		conn, _ = net.Dial("tcp", endpoint)
-		c <- false
-	}()
-	go func() {
-		time.Sleep(2 * time.Second)
-		c <- true
-	}()
-	<-c
-
-	restartCount := 0
-	totalTestCount := 1
-	for {
-		daemon, err := runDaemon()
-		if err != nil {
-			return err
-		}
-		restartCount++
-		//		time.Sleep(5000 * time.Millisecond)
-		var stop bool
-		go func() error {
-			stop = false
-			for i := 0; i < 100 && !stop; {
-				func() error {
-					cmd := exec.Command(DOCKERPATH, "run", "ubuntu", "echo", fmt.Sprintf("%d", totalTestCount))
-					i++
-					totalTestCount++
-					outPipe, err := cmd.StdoutPipe()
-					if err != nil {
-						return err
-					}
-					inPipe, err := cmd.StdinPipe()
-					if err != nil {
-						return err
-					}
-					if err := cmd.Start(); err != nil {
-						return err
-					}
-					if conn != nil {
-						go io.Copy(conn, outPipe)
-					}
-
-					// Expecting error, do not check
-					inPipe.Write([]byte("hello world!!!!!\n"))
-					go inPipe.Write([]byte("hello world!!!!!\n"))
-					go inPipe.Write([]byte("hello world!!!!!\n"))
-					inPipe.Close()
-
-					if err := cmd.Wait(); err != nil {
-						return err
-					}
-					outPipe.Close()
-					return nil
-				}()
-			}
-			return nil
-		}()
-		time.Sleep(20 * time.Second)
-		stop = true
-		if err := daemon.Process.Kill(); err != nil {
-			return err
-		}
-	}
-}
-
-func main() {
-	if err := crashTest(); err != nil {
-		log.Println(err)
-	}
-}

+ 1 - 1
contrib/desktop-integration/iceweasel/Dockerfile

@@ -29,7 +29,7 @@ FROM debian:wheezy
 MAINTAINER Daniel Mizyrycki <daniel@docker.com>
 
 # Install Iceweasel and "sudo"
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq iceweasel sudo
+RUN apt-get update && apt-get install -y iceweasel sudo
 
 # create sysadmin account
 RUN useradd -m -d /data -p saIVpsc0EVTwA sysadmin

+ 1 - 1
contrib/docker-device-tool/device_tool.go

@@ -3,7 +3,7 @@ package main
 import (
 	"flag"
 	"fmt"
-	"github.com/dotcloud/docker/daemon/graphdriver/devmapper"
+	"github.com/docker/docker/daemon/graphdriver/devmapper"
 	"os"
 	"path"
 	"sort"

+ 1 - 1
contrib/host-integration/Dockerfile.dev

@@ -19,7 +19,7 @@ ENV		GOROOT	  /goroot
 ENV		GOPATH	  /go
 ENV		PATH	  $GOROOT/bin:$PATH
 
-RUN		go get github.com/dotcloud/docker && cd /go/src/github.com/dotcloud/docker && git checkout v0.6.3
+RUN		go get github.com/docker/docker && cd /go/src/github.com/docker/docker && git checkout v0.6.3
 ADD		manager.go	/manager/
 RUN		cd /manager && go build -o /usr/bin/manager
 

+ 1 - 1
contrib/host-integration/manager.go

@@ -5,7 +5,7 @@ import (
 	"encoding/json"
 	"flag"
 	"fmt"
-	"github.com/dotcloud/docker"
+	"github.com/docker/docker"
 	"os"
 	"strings"
 	"text/template"

+ 1 - 1
contrib/host-integration/manager.sh

@@ -37,7 +37,7 @@ if [ ! -e "manager/$script" ]; then
 	exit 1
 fi
 
-# TODO https://github.com/dotcloud/docker/issues/734 (docker inspect formatting)
+# TODO https://github.com/docker/docker/issues/734 (docker inspect formatting)
 #if command -v docker > /dev/null 2>&1; then
 #	image="$(docker inspect -f '{{.Image}}' "$cid")"
 #	if [ "$image" ]; then

+ 2 - 0
contrib/init/systemd/MAINTAINERS

@@ -0,0 +1,2 @@
+Lokesh Mandvekar <lsm5@fedoraproject.org> (@lsm5)
+Brandon Philips <brandon.philips@coreos.com> (@philips)

+ 4 - 4
contrib/init/systemd/docker.service

@@ -1,13 +1,13 @@
 [Unit]
 Description=Docker Application Container Engine
 Documentation=http://docs.docker.com
-After=network.target
+After=network.target docker.socket
+Requires=docker.socket
 
 [Service]
-ExecStart=/usr/bin/docker -d
-Restart=on-failure
+ExecStart=/usr/bin/docker -d -H fd://
 LimitNOFILE=1048576
 LimitNPROC=1048576
 
 [Install]
-WantedBy=multi-user.target
+Also=docker.socket

+ 3 - 0
contrib/init/systemd/socket-activation/docker.socket → contrib/init/systemd/docker.socket

@@ -3,6 +3,9 @@ Description=Docker Socket for the API
 
 [Socket]
 ListenStream=/var/run/docker.sock
+SocketMode=0660
+SocketUser=root
+SocketGroup=docker
 
 [Install]
 WantedBy=sockets.target

+ 0 - 13
contrib/init/systemd/socket-activation/docker.service

@@ -1,13 +0,0 @@
-[Unit]
-Description=Docker Application Container Engine
-Documentation=http://docs.docker.com
-After=network.target
-
-[Service]
-ExecStart=/usr/bin/docker -d -H fd://
-Restart=on-failure
-LimitNOFILE=1048576
-LimitNPROC=1048576
-
-[Install]
-WantedBy=multi-user.target

+ 2 - 3
contrib/init/sysvinit-debian/docker

@@ -1,4 +1,5 @@
 #!/bin/sh
+set -e
 
 ### BEGIN INIT INFO
 # Provides:           docker
@@ -130,7 +131,7 @@ case "$1" in
 		;;
 
 	status)
-		status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" docker
+		status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC"
 		;;
 
 	*)
@@ -138,5 +139,3 @@ case "$1" in
 		exit 1
 		;;
 esac
-
-exit 0

+ 1 - 1
contrib/init/sysvinit-redhat/docker

@@ -50,7 +50,7 @@ start() {
         pid=$!
         touch $lockfile
         # wait up to 10 seconds for the pidfile to exist.  see
-        # https://github.com/dotcloud/docker/issues/5359
+        # https://github.com/docker/docker/issues/5359
         tries=0
         while [ ! -f $pidfile -a $tries -lt 10 ]; do
             sleep 1

+ 2 - 2
contrib/mkimage-alpine.sh

@@ -19,12 +19,12 @@ tmp() {
 }
 
 apkv() {
-	curl -s $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz |
+	curl -sSL $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz |
 		grep '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2
 }
 
 getapk() {
-	curl -s $REPO/$ARCH/apk-tools-static-$(apkv).apk |
+	curl -sSL $REPO/$ARCH/apk-tools-static-$(apkv).apk |
 		tar -xz -C $TMP sbin/apk.static
 }
 

+ 22 - 3
contrib/mkimage-arch.sh

@@ -5,8 +5,13 @@
 set -e
 
 hash pacstrap &>/dev/null || {
-    echo "Could not find pacstrap. Run pacman -S arch-install-scripts"
-    exit 1
+	echo "Could not find pacstrap. Run pacman -S arch-install-scripts"
+	exit 1
+}
+
+hash expect &>/dev/null || {
+	echo "Could not find expect. Run pacman -S expect"
+	exit 1
 }
 
 ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX)
@@ -15,7 +20,21 @@ chmod 755 $ROOTFS
 # packages to ignore for space savings
 PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs
 
-pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
+expect <<EOF
+	set send_slow {1 .1}
+	proc send {ignore arg} {
+		sleep .1
+		exp_send -s -- \$arg
+	}
+	set timeout 60
+
+	spawn pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
+	expect {
+		-exact "anyway? \[Y/n\] " { send -- "n\r"; exp_continue }
+		-exact "(default=all): " { send -- "\r"; exp_continue }
+		-exact "installation? \[Y/n\]" { send -- "y\r"; exp_continue }
+	}
+EOF
 
 arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux"
 arch-chroot $ROOTFS /bin/sh -c "ln -s /usr/share/zoneinfo/UTC /etc/localtime"

+ 1 - 1
contrib/mkimage-debootstrap.sh

@@ -144,7 +144,7 @@ if [ -z "$strictDebootstrap" ]; then
 	#  initctl (for some pesky upstart scripts)
 	sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
 	sudo ln -sf /bin/true sbin/initctl
-	# see https://github.com/dotcloud/docker/issues/446#issuecomment-16953173
+	# see https://github.com/docker/docker/issues/446#issuecomment-16953173
 	
 	# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
 	sudo chroot . apt-get clean

+ 20 - 3
contrib/mkimage/debootstrap

@@ -83,7 +83,7 @@ if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then
 		Dir::Cache::srcpkgcache "";
 
 		# Note that we do realize this isn't the ideal way to do this, and are always
-		# open to better suggestions (https://github.com/dotcloud/docker/issues).
+		# open to better suggestions (https://github.com/docker/docker/issues).
 	EOF
 
 	# remove apt-cache translations for fast "apt-get update"
@@ -95,6 +95,21 @@ if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then
 
 	Acquire::Languages "none";
 	EOF
+
+	echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'"
+	cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF'
+	# Since Docker users using "RUN apt-get update && apt-get install -y ..." in
+	# their Dockerfiles don't go delete the lists files afterwards, we want them to
+	# be as small as possible on-disk, so we explicitly request "gz" versions and
+	# tell Apt to keep them gzipped on-disk.
+
+	# For comparison, an "apt-get update" layer without this on a pristine
+	# "debian:wheezy" base image was "29.88 MB", where with this it was only
+	# "8.273 MB".
+
+	Acquire::GzipIndexes "true";
+	Acquire::CompressionTypes::Order:: "gz";
+	EOF
 fi
 
 if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
@@ -123,9 +138,9 @@ if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
 					" "$rootfsDir/etc/apt/sources.list"
 					echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list"
 					# LTS
-					if [ "$suite" = 'squeeze' ]; then
+					if [ "$suite" = 'squeeze' -o "$suite" = 'oldstable' ]; then
 						head -1 "$rootfsDir/etc/apt/sources.list" \
-							| sed "s/ $suite / ${suite}-lts /" \
+							| sed "s/ $suite / squeeze-lts /" \
 								>> "$rootfsDir/etc/apt/sources.list"
 					fi
 				)
@@ -173,4 +188,6 @@ fi
 	# delete all the apt list files since they're big and get stale quickly
 	rm -rf "$rootfsDir/var/lib/apt/lists"/*
 	# this forces "apt-get update" in dependent images, which is also good
+	
+	mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing."
 )

+ 64 - 0
contrib/nuke-graph-directory.sh

@@ -0,0 +1,64 @@
+#!/bin/sh
+set -e
+
+dir="$1"
+
+if [ -z "$dir" ]; then
+	{
+		echo 'This script is for destroying old /var/lib/docker directories more safely than'
+		echo '  "rm -rf", which can cause data loss or other serious issues.'
+		echo
+		echo "usage: $0 directory"
+		echo "   ie: $0 /var/lib/docker"
+	} >&2
+	exit 1
+fi
+
+if [ "$(id -u)" != 0 ]; then
+	echo >&2 "error: $0 must be run as root"
+	exit 1
+fi
+
+if [ ! -d "$dir" ]; then
+	echo >&2 "error: $dir is not a directory"
+	exit 1
+fi
+
+dir="$(readlink -f "$dir")"
+
+echo
+echo "Nuking $dir ..."
+echo '  (if this is wrong, press Ctrl+C NOW!)'
+echo
+
+( set -x; sleep 10 )
+echo
+
+dir_in_dir() {
+	inner="$1"
+	outer="$2"
+	[ "${inner#$outer}" != "$inner" ]
+}
+
+# let's start by unmounting any submounts in $dir
+#   (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!)
+for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do
+	mount="$(readlink -f "$mount" || true)"
+	if dir_in_dir "$mount" "$dir"; then
+		( set -x; umount -f "$mount" )
+	fi
+done
+
+# now, let's go destroy individual btrfs subvolumes, if any exist
+if command -v btrfs &> /dev/null; then
+	root="$(df "$dir" | awk 'NR>1 { print $NF }')"
+	for subvol in $(btrfs subvolume list -o "$root" 2>/dev/null | awk -F' path ' '{ print $2 }'); do
+		subvolDir="$root/$subvol"
+		if dir_in_dir "$subvolDir" "$dir"; then
+			( set -x; btrfs subvolume delete "$subvolDir" )
+		fi
+	done
+fi
+
+# finally, DESTROY ALL THINGS
+( set -x; rm -rf "$dir" )

+ 0 - 10
contrib/prepare-commit-msg.hook

@@ -1,10 +0,0 @@
-#!/bin/sh
-#       Auto sign all commits to allow them to be used by the Docker project.
-#       see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work
-#
-GH_USER=$(git config --get github.user)
-SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p")
-grep -qs "^$SOB" "$1" || { 
-	echo 
-	echo "$SOB" 
-} >> "$1"

+ 6 - 0
daemon/MAINTAINERS

@@ -0,0 +1,6 @@
+Solomon Hykes <solomon@docker.com> (@shykes)
+Victor Vieux <vieux@docker.com> (@vieux)
+Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
+Tibor Vass <teabee89@gmail.com> (@tiborvass)
+volumes.go: Brian Goff <cpuguy83@gmail.com> (@cpuguy83)

+ 129 - 16
daemon/attach.go

@@ -1,11 +1,124 @@
 package daemon
 
 import (
+	"encoding/json"
+	"fmt"
 	"io"
+	"os"
+	"time"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/utils"
 )
 
+func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+	}
+
+	var (
+		name   = job.Args[0]
+		logs   = job.GetenvBool("logs")
+		stream = job.GetenvBool("stream")
+		stdin  = job.GetenvBool("stdin")
+		stdout = job.GetenvBool("stdout")
+		stderr = job.GetenvBool("stderr")
+	)
+
+	container := daemon.Get(name)
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+
+	//logs
+	if logs {
+		cLog, err := container.ReadLog("json")
+		if err != nil && os.IsNotExist(err) {
+			// Legacy logs
+			log.Debugf("Old logs format")
+			if stdout {
+				cLog, err := container.ReadLog("stdout")
+				if err != nil {
+					log.Errorf("Error reading logs (stdout): %s", err)
+				} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
+					log.Errorf("Error streaming logs (stdout): %s", err)
+				}
+			}
+			if stderr {
+				cLog, err := container.ReadLog("stderr")
+				if err != nil {
+					log.Errorf("Error reading logs (stderr): %s", err)
+				} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
+					log.Errorf("Error streaming logs (stderr): %s", err)
+				}
+			}
+		} else if err != nil {
+			log.Errorf("Error reading logs (json): %s", err)
+		} else {
+			dec := json.NewDecoder(cLog)
+			for {
+				l := &jsonlog.JSONLog{}
+
+				if err := dec.Decode(l); err == io.EOF {
+					break
+				} else if err != nil {
+					log.Errorf("Error streaming logs: %s", err)
+					break
+				}
+				if l.Stream == "stdout" && stdout {
+					fmt.Fprintf(job.Stdout, "%s", l.Log)
+				}
+				if l.Stream == "stderr" && stderr {
+					fmt.Fprintf(job.Stderr, "%s", l.Log)
+				}
+			}
+		}
+	}
+
+	//stream
+	if stream {
+		var (
+			cStdin           io.ReadCloser
+			cStdout, cStderr io.Writer
+			cStdinCloser     io.Closer
+		)
+
+		if stdin {
+			r, w := io.Pipe()
+			go func() {
+				defer w.Close()
+				defer log.Debugf("Closing buffered stdin pipe")
+				io.Copy(w, job.Stdin)
+			}()
+			cStdin = r
+			cStdinCloser = job.Stdin
+		}
+		if stdout {
+			cStdout = job.Stdout
+		}
+		if stderr {
+			cStderr = job.Stderr
+		}
+
+		<-daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr)
+
+		// If we are in stdinonce mode, wait for the process to end
+		// otherwise, simply return
+		if container.Config.StdinOnce && !container.Config.Tty {
+			container.State.WaitStop(-1 * time.Second)
+		}
+	}
+	return engine.StatusOK
+}
+
+// FIXME: this should be private, and every outside subsystem
+// should go through the "container_attach" job. But that would require
+// that job to be properly documented, as well as the relationship betweem
+// Attach and ContainerAttach.
+//
+// This method is in use by builder/builder.go.
 func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
 	var (
 		cStdout, cStderr io.ReadCloser
@@ -19,8 +132,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 			errors <- err
 		} else {
 			go func() {
-				utils.Debugf("attach: stdin: begin")
-				defer utils.Debugf("attach: stdin: end")
+				log.Debugf("attach: stdin: begin")
+				defer log.Debugf("attach: stdin: end")
 				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
 				if container.Config.StdinOnce && !container.Config.Tty {
 					defer cStdin.Close()
@@ -43,7 +156,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 					err = nil
 				}
 				if err != nil {
-					utils.Errorf("attach: stdin: %s", err)
+					log.Errorf("attach: stdin: %s", err)
 				}
 				errors <- err
 			}()
@@ -56,8 +169,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 		} else {
 			cStdout = p
 			go func() {
-				utils.Debugf("attach: stdout: begin")
-				defer utils.Debugf("attach: stdout: end")
+				log.Debugf("attach: stdout: begin")
+				defer log.Debugf("attach: stdout: end")
 				// If we are in StdinOnce mode, then close stdin
 				if container.Config.StdinOnce && stdin != nil {
 					defer stdin.Close()
@@ -70,7 +183,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 					err = nil
 				}
 				if err != nil {
-					utils.Errorf("attach: stdout: %s", err)
+					log.Errorf("attach: stdout: %s", err)
 				}
 				errors <- err
 			}()
@@ -81,7 +194,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 				defer stdinCloser.Close()
 			}
 			if cStdout, err := container.StdoutPipe(); err != nil {
-				utils.Errorf("attach: stdout pipe: %s", err)
+				log.Errorf("attach: stdout pipe: %s", err)
 			} else {
 				io.Copy(&utils.NopWriter{}, cStdout)
 			}
@@ -94,8 +207,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 		} else {
 			cStderr = p
 			go func() {
-				utils.Debugf("attach: stderr: begin")
-				defer utils.Debugf("attach: stderr: end")
+				log.Debugf("attach: stderr: begin")
+				defer log.Debugf("attach: stderr: end")
 				// If we are in StdinOnce mode, then close stdin
 				if container.Config.StdinOnce && stdin != nil {
 					defer stdin.Close()
@@ -108,7 +221,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 					err = nil
 				}
 				if err != nil {
-					utils.Errorf("attach: stderr: %s", err)
+					log.Errorf("attach: stderr: %s", err)
 				}
 				errors <- err
 			}()
@@ -120,7 +233,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 			}
 
 			if cStderr, err := container.StderrPipe(); err != nil {
-				utils.Errorf("attach: stdout pipe: %s", err)
+				log.Errorf("attach: stdout pipe: %s", err)
 			} else {
 				io.Copy(&utils.NopWriter{}, cStderr)
 			}
@@ -140,14 +253,14 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 		// FIXME: how to clean up the stdin goroutine without the unwanted side effect
 		// of closing the passed stdin? Add an intermediary io.Pipe?
 		for i := 0; i < nJobs; i += 1 {
-			utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
+			log.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
 			if err := <-errors; err != nil {
-				utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
+				log.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
 				return err
 			}
-			utils.Debugf("attach: job %d completed successfully", i+1)
+			log.Debugf("attach: job %d completed successfully", i+1)
 		}
-		utils.Debugf("attach: all jobs completed successfully")
+		log.Debugf("attach: all jobs completed successfully")
 		return nil
 	})
 }

+ 129 - 31
server/buildfile.go → daemon/build.go

@@ -1,4 +1,4 @@
-package server
+package daemon
 
 import (
 	"crypto/sha256"
@@ -10,6 +10,7 @@ import (
 	"io/ioutil"
 	"net/url"
 	"os"
+	"os/exec"
 	"path"
 	"path/filepath"
 	"reflect"
@@ -19,16 +20,99 @@ import (
 	"syscall"
 	"time"
 
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon"
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/pkg/symlink"
-	"github.com/dotcloud/docker/pkg/system"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/pkg/system"
+	"github.com/docker/docker/pkg/tarsum"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
+func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status {
+	if len(job.Args) != 0 {
+		return job.Errorf("Usage: %s\n", job.Name)
+	}
+	var (
+		remoteURL      = job.Getenv("remote")
+		repoName       = job.Getenv("t")
+		suppressOutput = job.GetenvBool("q")
+		noCache        = job.GetenvBool("nocache")
+		rm             = job.GetenvBool("rm")
+		forceRm        = job.GetenvBool("forcerm")
+		authConfig     = &registry.AuthConfig{}
+		configFile     = &registry.ConfigFile{}
+		tag            string
+		context        io.ReadCloser
+	)
+	job.GetenvJson("authConfig", authConfig)
+	job.GetenvJson("configFile", configFile)
+	repoName, tag = parsers.ParseRepositoryTag(repoName)
+
+	if remoteURL == "" {
+		context = ioutil.NopCloser(job.Stdin)
+	} else if utils.IsGIT(remoteURL) {
+		if !strings.HasPrefix(remoteURL, "git://") {
+			remoteURL = "https://" + remoteURL
+		}
+		root, err := ioutil.TempDir("", "docker-build-git")
+		if err != nil {
+			return job.Error(err)
+		}
+		defer os.RemoveAll(root)
+
+		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
+			return job.Errorf("Error trying to use git: %s (%s)", err, output)
+		}
+
+		c, err := archive.Tar(root, archive.Uncompressed)
+		if err != nil {
+			return job.Error(err)
+		}
+		context = c
+	} else if utils.IsURL(remoteURL) {
+		f, err := utils.Download(remoteURL)
+		if err != nil {
+			return job.Error(err)
+		}
+		defer f.Body.Close()
+		dockerFile, err := ioutil.ReadAll(f.Body)
+		if err != nil {
+			return job.Error(err)
+		}
+		c, err := archive.Generate("Dockerfile", string(dockerFile))
+		if err != nil {
+			return job.Error(err)
+		}
+		context = c
+	}
+	defer context.Close()
+
+	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
+	b := NewBuildFile(daemon, daemon.eng,
+		&utils.StdoutFormater{
+			Writer:          job.Stdout,
+			StreamFormatter: sf,
+		},
+		&utils.StderrFormater{
+			Writer:          job.Stdout,
+			StreamFormatter: sf,
+		},
+		!suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
+	id, err := b.Build(context)
+	if err != nil {
+		return job.Error(err)
+	}
+	if repoName != "" {
+		daemon.Repositories().Set(repoName, tag, id, false)
+	}
+	return engine.StatusOK
+}
+
 var (
 	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
 )
@@ -40,15 +124,15 @@ type BuildFile interface {
 }
 
 type buildFile struct {
-	daemon *daemon.Daemon
-	srv    *Server
+	daemon *Daemon
+	eng    *engine.Engine
 
 	image      string
 	maintainer string
 	config     *runconfig.Config
 
 	contextPath string
-	context     *utils.TarSum
+	context     *tarsum.TarSum
 
 	verbose      bool
 	utilizeCache bool
@@ -67,6 +151,9 @@ type buildFile struct {
 	// Deprecated, original writer used for ImagePull. To be removed.
 	outOld io.Writer
 	sf     *utils.StreamFormatter
+
+	// cmdSet indicates is CMD was set in current Dockerfile
+	cmdSet bool
 }
 
 func (b *buildFile) clearTmp(containers map[string]struct{}) {
@@ -85,7 +172,7 @@ func (b *buildFile) CmdFrom(name string) error {
 	image, err := b.daemon.Repositories().LookupImage(name)
 	if err != nil {
 		if b.daemon.Graph().IsNotExist(err) {
-			remote, tag := utils.ParseRepositoryTag(name)
+			remote, tag := parsers.ParseRepositoryTag(name)
 			pullRegistryAuth := b.authConfig
 			if len(b.configFile.Configs) > 0 {
 				// The request came with a full auth config file, we prefer to use that
@@ -96,7 +183,7 @@ func (b *buildFile) CmdFrom(name string) error {
 				resolvedAuth := b.configFile.ResolveAuthConfig(endpoint)
 				pullRegistryAuth = &resolvedAuth
 			}
-			job := b.srv.Eng.Job("pull", remote, tag)
+			job := b.eng.Job("pull", remote, tag)
 			job.SetenvBool("json", b.sf.Json())
 			job.SetenvBool("parallel", true)
 			job.SetenvJson("authConfig", pullRegistryAuth)
@@ -118,7 +205,7 @@ func (b *buildFile) CmdFrom(name string) error {
 		b.config = image.Config
 	}
 	if b.config.Env == nil || len(b.config.Env) == 0 {
-		b.config.Env = append(b.config.Env, "HOME=/", "PATH="+daemon.DefaultPathEnv)
+		b.config.Env = append(b.config.Env, "PATH="+DefaultPathEnv)
 	}
 	// Process ONBUILD triggers if they exist
 	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
@@ -167,20 +254,20 @@ func (b *buildFile) CmdMaintainer(name string) error {
 
 // probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
 // and if so attempts to look up the current `b.image` and `b.config` pair
-// in the current server `b.srv`. If an image is found, probeCache returns
+// in the current server `b.daemon`. If an image is found, probeCache returns
 // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
 // is any error, it returns `(false, err)`.
 func (b *buildFile) probeCache() (bool, error) {
 	if b.utilizeCache {
-		if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
+		if cache, err := b.daemon.ImageGetCached(b.image, b.config); err != nil {
 			return false, err
 		} else if cache != nil {
 			fmt.Fprintf(b.outStream, " ---> Using cache\n")
-			utils.Debugf("[BUILDER] Use cached version")
+			log.Debugf("[BUILDER] Use cached version")
 			b.image = cache.ID
 			return true, nil
 		} else {
-			utils.Debugf("[BUILDER] Cache miss")
+			log.Debugf("[BUILDER] Cache miss")
 		}
 	}
 	return false, nil
@@ -196,12 +283,13 @@ func (b *buildFile) CmdRun(args string) error {
 	}
 
 	cmd := b.config.Cmd
-	b.config.Cmd = nil
+	// set Cmd manually, this is special case only for Dockerfiles
+	b.config.Cmd = config.Cmd
 	runconfig.Merge(b.config, config)
 
 	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
 
-	utils.Debugf("Command to be executed: %v", b.config.Cmd)
+	log.Debugf("Command to be executed: %v", b.config.Cmd)
 
 	hit, err := b.probeCache()
 	if err != nil {
@@ -291,7 +379,7 @@ func (b *buildFile) CmdEnv(args string) error {
 func (b *buildFile) buildCmdFromJson(args string) []string {
 	var cmd []string
 	if err := json.Unmarshal([]byte(args), &cmd); err != nil {
-		utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
+		log.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
 		cmd = []string{"/bin/sh", "-c", args}
 	}
 	return cmd
@@ -303,12 +391,17 @@ func (b *buildFile) CmdCmd(args string) error {
 	if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
 		return err
 	}
+	b.cmdSet = true
 	return nil
 }
 
 func (b *buildFile) CmdEntrypoint(args string) error {
 	entrypoint := b.buildCmdFromJson(args)
 	b.config.Entrypoint = entrypoint
+	// if there is no cmd in current Dockerfile - cleanup cmd
+	if !b.cmdSet {
+		b.config.Cmd = nil
+	}
 	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
 		return err
 	}
@@ -404,7 +497,7 @@ func (b *buildFile) checkPathForAddition(orig string) error {
 	return nil
 }
 
-func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
+func (b *buildFile) addContext(container *Container, orig, dest string, decompress bool) error {
 	var (
 		err        error
 		destExists = true
@@ -459,7 +552,7 @@ func (b *buildFile) addContext(container *daemon.Container, orig, dest string, d
 		if err := archive.UntarPath(origPath, tarDest); err == nil {
 			return nil
 		} else if err != io.EOF {
-			utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
+			log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
 		}
 	}
 
@@ -553,7 +646,7 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp
 		if err != nil {
 			return err
 		}
-		tarSum := &utils.TarSum{Reader: r, DisableCompression: true}
+		tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true}
 		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
 			return err
 		}
@@ -656,7 +749,7 @@ func (b *buildFile) CmdAdd(args string) error {
 	return b.runContextCommand(args, true, true, "ADD")
 }
 
-func (b *buildFile) create() (*daemon.Container, error) {
+func (b *buildFile) create() (*Container, error) {
 	if b.image == "" {
 		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
 	}
@@ -677,10 +770,15 @@ func (b *buildFile) create() (*daemon.Container, error) {
 	return c, nil
 }
 
-func (b *buildFile) run(c *daemon.Container) error {
+func (b *buildFile) run(c *Container) error {
 	var errCh chan error
 	if b.verbose {
 		errCh = utils.Go(func() error {
+			// FIXME: call the 'attach' job so that daemon.Attach can be made private
+			//
+			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
+			// but without hijacking for stdin. Also, with attach there can be race
+			// condition because of some output already was printed before it.
 			return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream)
 		})
 	}
@@ -775,7 +873,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
 		return "", err
 	}
 
-	b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true}
+	b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true}
 	if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
 		return "", err
 	}
@@ -889,10 +987,10 @@ func fixPermissions(destination string, uid, gid int) error {
 	})
 }
 
-func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
+func NewBuildFile(d *Daemon, eng *engine.Engine, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
 	return &buildFile{
-		daemon:        srv.daemon,
-		srv:           srv,
+		daemon:        d,
+		eng:           eng,
 		config:        &runconfig.Config{},
 		outStream:     outStream,
 		errStream:     errStream,

+ 32 - 0
daemon/changes.go

@@ -0,0 +1,32 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status {
+	if n := len(job.Args); n != 1 {
+		return job.Errorf("Usage: %s CONTAINER", job.Name)
+	}
+	name := job.Args[0]
+	if container := daemon.Get(name); container != nil {
+		outs := engine.NewTable("", 0)
+		changes, err := container.Changes()
+		if err != nil {
+			return job.Error(err)
+		}
+		for _, change := range changes {
+			out := &engine.Env{}
+			if err := out.Import(change); err != nil {
+				return job.Error(err)
+			}
+			outs.Add(out)
+		}
+		if _, err := outs.WriteListTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+	} else {
+		return job.Errorf("No such container: %s", name)
+	}
+	return engine.StatusOK
+}

+ 84 - 0
daemon/commit.go

@@ -0,0 +1,84 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/runconfig"
+)
+
+func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
+	}
+	name := job.Args[0]
+
+	container := daemon.Get(name)
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+
+	var (
+		config    = container.Config
+		newConfig runconfig.Config
+	)
+
+	if err := job.GetenvJson("config", &newConfig); err != nil {
+		return job.Error(err)
+	}
+
+	if err := runconfig.Merge(&newConfig, config); err != nil {
+		return job.Error(err)
+	}
+
+	img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
+	if err != nil {
+		return job.Error(err)
+	}
+	job.Printf("%s\n", img.ID)
+	return engine.StatusOK
+}
+
+// Commit creates a new filesystem image from the current state of a container.
+// The image can optionally be tagged into a repository
+func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
+	if pause {
+		container.Pause()
+		defer container.Unpause()
+	}
+
+	if err := container.Mount(); err != nil {
+		return nil, err
+	}
+	defer container.Unmount()
+
+	rwTar, err := container.ExportRw()
+	if err != nil {
+		return nil, err
+	}
+	defer rwTar.Close()
+
+	// Create a new image from the container's base layers + a new layer from container changes
+	var (
+		containerID, containerImage string
+		containerConfig             *runconfig.Config
+	)
+
+	if container != nil {
+		containerID = container.ID
+		containerImage = container.Image
+		containerConfig = container.Config
+	}
+
+	img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
+	if err != nil {
+		return nil, err
+	}
+
+	// Register the image if needed
+	if repository != "" {
+		if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
+			return img, err
+		}
+	}
+	return img, nil
+}

+ 70 - 0
daemon/config.go

@@ -0,0 +1,70 @@
+package daemon
+
+import (
+	"net"
+
+	"github.com/docker/docker/daemon/networkdriver"
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+const (
+	defaultNetworkMtu    = 1500
+	DisableNetworkBridge = "none"
+)
+
+// Config define the configuration of a docker daemon
+// These are the configuration settings that you pass
+// to the docker daemon when you launch it with say: `docker -d -e lxc`
+// FIXME: separate runtime configuration from http api configuration
+type Config struct {
+	Pidfile                     string
+	Root                        string
+	AutoRestart                 bool
+	Dns                         []string
+	DnsSearch                   []string
+	EnableIptables              bool
+	EnableIpForward             bool
+	DefaultIp                   net.IP
+	BridgeIface                 string
+	BridgeIP                    string
+	InterContainerCommunication bool
+	GraphDriver                 string
+	GraphOptions                []string
+	ExecDriver                  string
+	Mtu                         int
+	DisableNetwork              bool
+	EnableSelinuxSupport        bool
+	Context                     map[string][]string
+}
+
+// InstallFlags adds command-line options to the top-level flag parser for
+// the current process.
+// Subsequent calls to `flag.Parse` will populate config with values parsed
+// from the command-line.
+func (config *Config) InstallFlags() {
+	flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
+	flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime")
+	flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated infavor of --restart policies on docker run")
+	flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules")
+	flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
+	flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
+	flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
+	flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication")
+	flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver")
+	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver")
+	flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver")
+	flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available")
+	opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports")
+	opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
+	// FIXME: why the inconsistency between "hosts" and "sockets"?
+	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
+	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
+}
+
+func GetDefaultNetworkMtu() int {
+	if iface, err := networkdriver.GetDefaultRouteIface(); err == nil {
+		return iface.MTU
+	}
+	return defaultNetworkMtu
+}

+ 94 - 128
daemon/container.go

@@ -6,7 +6,6 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
-	"log"
 	"os"
 	"path"
 	"path/filepath"
@@ -17,18 +16,21 @@ import (
 
 	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/links"
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/pkg/networkfs/etchosts"
-	"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
-	"github.com/dotcloud/docker/pkg/symlink"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
+
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/links"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/pkg/broadcastwriter"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/networkfs/etchosts"
+	"github.com/docker/docker/pkg/networkfs/resolvconf"
+	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
 const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@@ -66,13 +68,14 @@ type Container struct {
 	ExecDriver     string
 
 	command   *execdriver.Command
-	stdout    *utils.WriteBroadcaster
-	stderr    *utils.WriteBroadcaster
+	stdout    *broadcastwriter.BroadcastWriter
+	stderr    *broadcastwriter.BroadcastWriter
 	stdin     io.ReadCloser
 	stdinPipe io.WriteCloser
 
 	daemon                   *Daemon
 	MountLabel, ProcessLabel string
+	RestartCount             int
 
 	Volumes map[string]string
 	// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
@@ -81,6 +84,7 @@ type Container struct {
 	hostConfig *runconfig.HostConfig
 
 	activeLinks map[string]*links.Link
+	monitor     *containerMonitor
 }
 
 func (container *Container) FromDisk() error {
@@ -105,7 +109,7 @@ func (container *Container) FromDisk() error {
 	return container.readHostConfig()
 }
 
-func (container *Container) ToDisk() error {
+func (container *Container) toDisk() error {
 	data, err := json.Marshal(container)
 	if err != nil {
 		return err
@@ -124,6 +128,13 @@ func (container *Container) ToDisk() error {
 	return container.WriteHostConfig()
 }
 
+func (container *Container) ToDisk() error {
+	container.Lock()
+	err := container.toDisk()
+	container.Unlock()
+	return err
+}
+
 func (container *Container) readHostConfig() error {
 	container.hostConfig = &runconfig.HostConfig{}
 	// If the hostconfig file does not exist, do not read it.
@@ -160,6 +171,13 @@ func (container *Container) WriteHostConfig() error {
 	return ioutil.WriteFile(pth, data, 0666)
 }
 
+func (container *Container) LogEvent(action string) {
+	d := container.daemon
+	if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil {
+		log.Errorf("Error logging event %s for %s: %s", action, container.ID, err)
+	}
+}
+
 func (container *Container) getResourcePath(path string) (string, error) {
 	cleanPath := filepath.Join("/", path)
 	return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs)
@@ -208,6 +226,20 @@ func populateCommand(c *Container, env []string) error {
 		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
 	}
 
+	// Build lists of devices allowed and created within the container.
+	userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices))
+	for i, deviceMapping := range c.hostConfig.Devices {
+		device, err := devices.GetDevice(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
+		device.Path = deviceMapping.PathInContainer
+		if err != nil {
+			return fmt.Errorf("error gathering device information while adding custom device %s", err)
+		}
+		userSpecifiedDevices[i] = device
+	}
+	allowedDevices := append(devices.DefaultAllowedDevices, userSpecifiedDevices...)
+
+	autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...)
+
 	// TODO: this can be removed after lxc-conf is fully deprecated
 	mergeLxcConfIntoOptions(c.hostConfig, context)
 
@@ -230,8 +262,10 @@ func populateCommand(c *Container, env []string) error {
 		User:               c.Config.User,
 		Config:             context,
 		Resources:          resources,
-		AllowedDevices:     devices.DefaultAllowedDevices,
-		AutoCreatedDevices: devices.DefaultAutoCreatedDevices,
+		AllowedDevices:     allowedDevices,
+		AutoCreatedDevices: autoCreatedDevices,
+		CapAdd:             c.hostConfig.CapAdd,
+		CapDrop:            c.hostConfig.CapDrop,
 	}
 	c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
 	c.command.Env = env
@@ -245,6 +279,7 @@ func (container *Container) Start() (err error) {
 	if container.State.IsRunning() {
 		return nil
 	}
+
 	// if we encounter and error during start we need to ensure that any other
 	// setup has been cleaned up properly
 	defer func() {
@@ -280,9 +315,6 @@ func (container *Container) Start() (err error) {
 	if err := setupMountsForContainer(container); err != nil {
 		return err
 	}
-	if err := container.startLoggingToDisk(); err != nil {
-		return err
-	}
 
 	return container.waitForStart()
 }
@@ -463,40 +495,8 @@ func (container *Container) releaseNetwork() {
 	container.NetworkSettings = &NetworkSettings{}
 }
 
-func (container *Container) monitor(callback execdriver.StartCallback) error {
-	var (
-		err      error
-		exitCode int
-	)
-
-	pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
-	exitCode, err = container.daemon.Run(container, pipes, callback)
-	if err != nil {
-		utils.Errorf("Error running container: %s", err)
-	}
-	container.State.SetStopped(exitCode)
-
-	// Cleanup
-	container.cleanup()
-
-	// Re-create a brand new stdin pipe once the container exited
-	if container.Config.OpenStdin {
-		container.stdin, container.stdinPipe = io.Pipe()
-	}
-	if container.daemon != nil && container.daemon.srv != nil {
-		container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
-	}
-	if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
-		// FIXME: here is race condition between two RUN instructions in Dockerfile
-		// because they share same runconfig and change image. Must be fixed
-		// in server/buildfile.go
-		if err := container.ToDisk(); err != nil {
-			utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
-		}
-	}
-	return err
-}
-
+// cleanup releases any network resources allocated to the container along with any rules
+// around how containers are linked together.  It also unmounts the container's root filesystem.
 func (container *Container) cleanup() {
 	container.releaseNetwork()
 
@@ -506,30 +506,14 @@ func (container *Container) cleanup() {
 			link.Disable()
 		}
 	}
-	if container.Config.OpenStdin {
-		if err := container.stdin.Close(); err != nil {
-			utils.Errorf("%s: Error close stdin: %s", container.ID, err)
-		}
-	}
-	if err := container.stdout.CloseWriters(); err != nil {
-		utils.Errorf("%s: Error close stdout: %s", container.ID, err)
-	}
-	if err := container.stderr.CloseWriters(); err != nil {
-		utils.Errorf("%s: Error close stderr: %s", container.ID, err)
-	}
-	if container.command != nil && container.command.Terminal != nil {
-		if err := container.command.Terminal.Close(); err != nil {
-			utils.Errorf("%s: Error closing terminal: %s", container.ID, err)
-		}
-	}
 
 	if err := container.Unmount(); err != nil {
-		log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
+		log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
 	}
 }
 
 func (container *Container) KillSig(sig int) error {
-	utils.Debugf("Sending %d to %s", sig, container.ID)
+	log.Debugf("Sending %d to %s", sig, container.ID)
 	container.Lock()
 	defer container.Unlock()
 
@@ -541,6 +525,18 @@ func (container *Container) KillSig(sig int) error {
 	if !container.State.IsRunning() {
 		return nil
 	}
+
+	// signal to the monitor that it should not restart the container
+	// after we send the kill signal
+	container.monitor.ExitOnNext()
+
+	// if the container is currently restarting we do not need to send the signal
+	// to the process.  Telling the monitor that it should exit on it's next event
+	// loop is enough
+	if container.State.IsRestarting() {
+		return nil
+	}
+
 	return container.daemon.Kill(container, sig)
 }
 
@@ -578,7 +574,7 @@ func (container *Container) Kill() error {
 	if _, err := container.State.WaitStop(10 * time.Second); err != nil {
 		// Ensure that we don't kill ourselves
 		if pid := container.State.GetPid(); pid != 0 {
-			log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
+			log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
 			if err := syscall.Kill(pid, 9); err != nil {
 				return err
 			}
@@ -596,7 +592,7 @@ func (container *Container) Stop(seconds int) error {
 
 	// 1. Send a SIGTERM
 	if err := container.KillSig(15); err != nil {
-		log.Print("Failed to send SIGTERM to the process, force killing")
+		log.Infof("Failed to send SIGTERM to the process, force killing")
 		if err := container.KillSig(9); err != nil {
 			return err
 		}
@@ -604,7 +600,7 @@ func (container *Container) Stop(seconds int) error {
 
 	// 2. Wait for the process to exit on its own
 	if _, err := container.State.WaitStop(time.Duration(seconds) * time.Second); err != nil {
-		log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
+		log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
 		// 3. If it doesn't, then send SIGKILL
 		if err := container.Kill(); err != nil {
 			container.State.WaitStop(-1 * time.Second)
@@ -733,7 +729,7 @@ func (container *Container) GetSize() (int64, int64) {
 	)
 
 	if err := container.Mount(); err != nil {
-		utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
+		log.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
 		return sizeRw, sizeRootfs
 	}
 	defer container.Unmount()
@@ -741,7 +737,7 @@ func (container *Container) GetSize() (int64, int64) {
 	if differ, ok := container.daemon.driver.(graphdriver.Differ); ok {
 		sizeRw, err = differ.DiffSize(container.ID)
 		if err != nil {
-			utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
+			log.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
 			// FIXME: GetSize should return an error. Not changing it now in case
 			// there is a side-effect.
 			sizeRw = -1
@@ -838,7 +834,7 @@ func (container *Container) DisableLink(name string) {
 		if link, exists := container.activeLinks[name]; exists {
 			link.Disable()
 		} else {
-			utils.Debugf("Could not find active link for %s", name)
+			log.Debugf("Could not find active link for %s", name)
 		}
 	}
 }
@@ -853,18 +849,16 @@ func (container *Container) setupContainerDns() error {
 		daemon = container.daemon
 	)
 
-	if config.NetworkMode == "host" {
-		container.ResolvConfPath = "/etc/resolv.conf"
-		return nil
-	}
-
 	resolvConf, err := resolvconf.Get()
 	if err != nil {
 		return err
 	}
+	container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf")
+	if err != nil {
+		return err
+	}
 
-	// If custom dns exists, then create a resolv.conf for the container
-	if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 {
+	if config.NetworkMode != "host" && (len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0) {
 		var (
 			dns       = resolvconf.GetNameservers(resolvConf)
 			dnsSearch = resolvconf.GetSearchDomains(resolvConf)
@@ -879,18 +873,9 @@ func (container *Container) setupContainerDns() error {
 		} else if len(daemon.config.DnsSearch) > 0 {
 			dnsSearch = daemon.config.DnsSearch
 		}
-
-		resolvConfPath, err := container.getRootResourcePath("resolv.conf")
-		if err != nil {
-			return err
-		}
-		container.ResolvConfPath = resolvConfPath
-
 		return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch)
-	} else {
-		container.ResolvConfPath = "/etc/resolv.conf"
 	}
-	return nil
+	return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644)
 }
 
 func (container *Container) initializeNetworking() error {
@@ -950,15 +935,15 @@ func (container *Container) initializeNetworking() error {
 // Make sure the config is compatible with the current kernel
 func (container *Container) verifyDaemonSettings() {
 	if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
-		log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
+		log.Infof("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
 		container.Config.Memory = 0
 	}
 	if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
-		log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
+		log.Infof("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.")
 		container.Config.MemorySwap = -1
 	}
 	if container.daemon.sysInfo.IPv4ForwardingDisabled {
-		log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
+		log.Infof("WARNING: IPv4 forwarding is disabled. Networking will not work")
 	}
 }
 
@@ -1019,9 +1004,12 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
 func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
 	// Setup environment
 	env := []string{
-		"HOME=/",
 		"PATH=" + DefaultPathEnv,
 		"HOSTNAME=" + container.Config.Hostname,
+		// Note: we don't set HOME here because it'll get autoset intelligently
+		// based on the value of USER inside dockerinit, but only if it isn't
+		// set already (ie, that can be overridden by setting HOME via -e or ENV
+		// in a Dockerfile).
 	}
 	if container.Config.Tty {
 		env = append(env, "TERM=xterm")
@@ -1080,38 +1068,16 @@ func (container *Container) startLoggingToDisk() error {
 }
 
 func (container *Container) waitForStart() error {
-	callback := func(command *execdriver.Command) {
-		if command.Tty {
-			// The callback is called after the process Start()
-			// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
-			// which we close here.
-			if c, ok := command.Stdout.(io.Closer); ok {
-				c.Close()
-			}
-		}
-		container.State.SetRunning(command.Pid())
-		if err := container.ToDisk(); err != nil {
-			utils.Debugf("%s", err)
-		}
-	}
-
-	// We use a callback here instead of a goroutine and an chan for
-	// syncronization purposes
-	cErr := utils.Go(func() error { return container.monitor(callback) })
+	container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy)
 
-	waitStart := make(chan struct{})
-
-	go func() {
-		container.State.WaitRunning(-1 * time.Second)
-		close(waitStart)
-	}()
-
-	// Start should not return until the process is actually running
+	// block until we either receive an error from the initial start of the container's
+	// process or until the process is running in the container
 	select {
-	case <-waitStart:
-	case err := <-cErr:
+	case <-container.monitor.startSignal:
+	case err := <-utils.Go(container.monitor.Start):
 		return err
 	}
+
 	return nil
 }
 

+ 36 - 1
daemon/container_unit_test.go

@@ -1,7 +1,7 @@
 package daemon
 
 import (
-	"github.com/dotcloud/docker/nat"
+	"github.com/docker/docker/nat"
 	"testing"
 )
 
@@ -89,6 +89,41 @@ func TestParseNetworkOptsPublic(t *testing.T) {
 	}
 }
 
+func TestParseNetworkOptsPublicNoPort(t *testing.T) {
+	ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100"})
+
+	if err == nil {
+		t.Logf("Expected error Invalid containerPort")
+		t.Fail()
+	}
+	if ports != nil {
+		t.Logf("Expected nil got %s", ports)
+		t.Fail()
+	}
+	if bindings != nil {
+		t.Logf("Expected nil got %s", bindings)
+		t.Fail()
+	}
+}
+
+func TestParseNetworkOptsNegativePorts(t *testing.T) {
+	ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:-1:-1"})
+
+	if err == nil {
+		t.Fail()
+	}
+	t.Logf("%v", len(ports))
+	t.Logf("%v", bindings)
+	if len(ports) != 0 {
+		t.Logf("Expected nil got %s", len(ports))
+		t.Fail()
+	}
+	if len(bindings) != 0 {
+		t.Logf("Expected 0 got %s", len(bindings))
+		t.Fail()
+	}
+}
+
 func TestParseNetworkOptsUdp(t *testing.T) {
 	ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"})
 	if err != nil {

+ 33 - 0
daemon/copy.go

@@ -0,0 +1,33 @@
+package daemon
+
+import (
+	"io"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status {
+	if len(job.Args) != 2 {
+		return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
+	}
+
+	var (
+		name     = job.Args[0]
+		resource = job.Args[1]
+	)
+
+	if container := daemon.Get(name); container != nil {
+
+		data, err := container.Copy(resource)
+		if err != nil {
+			return job.Error(err)
+		}
+		defer data.Close()
+
+		if _, err := io.Copy(job.Stdout, data); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	}
+	return job.Errorf("No such container: %s", name)
+}

+ 86 - 0
daemon/create.go

@@ -0,0 +1,86 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/runconfig"
+)
+
+func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
+	var name string
+	if len(job.Args) == 1 {
+		name = job.Args[0]
+	} else if len(job.Args) > 1 {
+		return job.Errorf("Usage: %s", job.Name)
+	}
+	config := runconfig.ContainerConfigFromJob(job)
+	if config.Memory != 0 && config.Memory < 524288 {
+		return job.Errorf("Minimum memory limit allowed is 512k")
+	}
+	if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
+		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
+		config.Memory = 0
+	}
+	if config.Memory > 0 && !daemon.SystemConfig().SwapLimit {
+		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
+		config.MemorySwap = -1
+	}
+	container, buildWarnings, err := daemon.Create(config, name)
+	if err != nil {
+		if daemon.Graph().IsNotExist(err) {
+			_, tag := parsers.ParseRepositoryTag(config.Image)
+			if tag == "" {
+				tag = graph.DEFAULTTAG
+			}
+			return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
+		}
+		return job.Error(err)
+	}
+	if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
+		job.Errorf("IPv4 forwarding is disabled.\n")
+	}
+	container.LogEvent("create")
+	// FIXME: this is necessary because daemon.Create might return a nil container
+	// with a non-nil error. This should not happen! Once it's fixed we
+	// can remove this workaround.
+	if container != nil {
+		job.Printf("%s\n", container.ID)
+	}
+	for _, warning := range buildWarnings {
+		job.Errorf("%s\n", warning)
+	}
+	return engine.StatusOK
+}
+
+// Create creates a new container from the given configuration with a given name.
+func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) {
+	var (
+		container *Container
+		warnings  []string
+	)
+
+	img, err := daemon.repositories.LookupImage(config.Image)
+	if err != nil {
+		return nil, nil, err
+	}
+	if err := img.CheckDepth(); err != nil {
+		return nil, nil, err
+	}
+	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
+		return nil, nil, err
+	}
+	if container, err = daemon.newContainer(name, config, img); err != nil {
+		return nil, nil, err
+	}
+	if err := daemon.createRootfs(container, img); err != nil {
+		return nil, nil, err
+	}
+	if err := container.ToDisk(); err != nil {
+		return nil, nil, err
+	}
+	if err := daemon.Register(container); err != nil {
+		return nil, nil, err
+	}
+	return container, warnings, nil
+}

+ 280 - 256
daemon/daemon.go

@@ -4,42 +4,40 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
-	"log"
 	"os"
 	"path"
 	"regexp"
+	"runtime"
 	"strings"
 	"sync"
 	"time"
 
 	"github.com/docker/libcontainer/label"
-	"github.com/docker/libcontainer/selinux"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/execdriver/execdrivers"
-	"github.com/dotcloud/docker/daemon/execdriver/lxc"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	_ "github.com/dotcloud/docker/daemon/graphdriver/vfs"
-	_ "github.com/dotcloud/docker/daemon/networkdriver/bridge"
-	"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
-	"github.com/dotcloud/docker/daemonconfig"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/graph"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/pkg/graphdb"
-	"github.com/dotcloud/docker/pkg/namesgenerator"
-	"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
-	"github.com/dotcloud/docker/pkg/sysinfo"
-	"github.com/dotcloud/docker/pkg/truncindex"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
-)
 
-// Set the max depth to the aufs default that most
-// kernels are compiled with
-// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk
-const MaxImageDepth = 127
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/execdrivers"
+	"github.com/docker/docker/daemon/execdriver/lxc"
+	"github.com/docker/docker/daemon/graphdriver"
+	_ "github.com/docker/docker/daemon/graphdriver/vfs"
+	_ "github.com/docker/docker/daemon/networkdriver/bridge"
+	"github.com/docker/docker/daemon/networkdriver/portallocator"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/broadcastwriter"
+	"github.com/docker/docker/pkg/graphdb"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/namesgenerator"
+	"github.com/docker/docker/pkg/networkfs/resolvconf"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/docker/pkg/sysinfo"
+	"github.com/docker/docker/pkg/truncindex"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
+)
 
 var (
 	DefaultDns                = []string{"8.8.8.8", "8.8.4.4"}
@@ -91,38 +89,65 @@ type Daemon struct {
 	idIndex        *truncindex.TruncIndex
 	sysInfo        *sysinfo.SysInfo
 	volumes        *graph.Graph
-	srv            Server
 	eng            *engine.Engine
-	config         *daemonconfig.Config
+	config         *Config
 	containerGraph *graphdb.Database
 	driver         graphdriver.Driver
 	execDriver     execdriver.Driver
-	Sockets        []string
 }
 
 // Install installs daemon capabilities to eng.
 func (daemon *Daemon) Install(eng *engine.Engine) error {
-	return eng.Register("container_inspect", daemon.ContainerInspect)
-}
-
-// List returns an array of all containers registered in the daemon.
-func (daemon *Daemon) List() []*Container {
-	return daemon.containers.List()
+	// FIXME: rename "delete" to "rm" for consistency with the CLI command
+	// FIXME: rename ContainerDestroy to ContainerRm for consistency with the CLI command
+	// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
+	for name, method := range map[string]engine.Handler{
+		"attach":            daemon.ContainerAttach,
+		"build":             daemon.CmdBuild,
+		"commit":            daemon.ContainerCommit,
+		"container_changes": daemon.ContainerChanges,
+		"container_copy":    daemon.ContainerCopy,
+		"container_inspect": daemon.ContainerInspect,
+		"containers":        daemon.Containers,
+		"create":            daemon.ContainerCreate,
+		"delete":            daemon.ContainerDestroy,
+		"export":            daemon.ContainerExport,
+		"info":              daemon.CmdInfo,
+		"kill":              daemon.ContainerKill,
+		"logs":              daemon.ContainerLogs,
+		"pause":             daemon.ContainerPause,
+		"resize":            daemon.ContainerResize,
+		"restart":           daemon.ContainerRestart,
+		"start":             daemon.ContainerStart,
+		"stop":              daemon.ContainerStop,
+		"top":               daemon.ContainerTop,
+		"unpause":           daemon.ContainerUnpause,
+		"wait":              daemon.ContainerWait,
+		"image_delete":      daemon.ImageDelete, // FIXME: see above
+	} {
+		if err := eng.Register(name, method); err != nil {
+			return err
+		}
+	}
+	if err := daemon.Repositories().Install(eng); err != nil {
+		return err
+	}
+	// FIXME: this hack is necessary for legacy integration tests to access
+	// the daemon object.
+	eng.Hack_SetGlobalVar("httpapi.daemon", daemon)
+	return nil
 }
 
 // Get looks for a container by the specified ID or name, and returns it.
 // If the container is not found, or if an error occurs, nil is returned.
 func (daemon *Daemon) Get(name string) *Container {
+	if id, err := daemon.idIndex.Get(name); err == nil {
+		return daemon.containers.Get(id)
+	}
 	if c, _ := daemon.GetByName(name); c != nil {
 		return c
 	}
-
-	id, err := daemon.idIndex.Get(name)
-	if err != nil {
-		return nil
-	}
-
-	return daemon.containers.Get(id)
+	return nil
 }
 
 // Exists returns a true if a container of the specified ID or name exists,
@@ -142,20 +167,24 @@ func (daemon *Daemon) load(id string) (*Container, error) {
 	if err := container.FromDisk(); err != nil {
 		return nil, err
 	}
+
 	if container.ID != id {
 		return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
 	}
+
+	container.readHostConfig()
+
 	return container, nil
 }
 
 // Register makes a container object usable by the daemon as <container.ID>
 // This is a wrapper for register
 func (daemon *Daemon) Register(container *Container) error {
-	return daemon.register(container, true, nil)
+	return daemon.register(container, true)
 }
 
 // register makes a container object usable by the daemon as <container.ID>
-func (daemon *Daemon) register(container *Container, updateSuffixarray bool, containersToStart *[]*Container) error {
+func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error {
 	if container.daemon != nil || daemon.Exists(container.ID) {
 		return fmt.Errorf("Container is already loaded")
 	}
@@ -169,8 +198,8 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
 	container.daemon = daemon
 
 	// Attach to stdout and stderr
-	container.stderr = utils.NewWriteBroadcaster()
-	container.stdout = utils.NewWriteBroadcaster()
+	container.stderr = broadcastwriter.New()
+	container.stdout = broadcastwriter.New()
 	// Attach to stdin
 	if container.Config.OpenStdin {
 		container.stdin, container.stdinPipe = io.Pipe()
@@ -188,7 +217,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
 	//        if so, then we need to restart monitor and init a new lock
 	// If the container is supposed to be running, make sure of it
 	if container.State.IsRunning() {
-		utils.Debugf("killing old running container %s", container.ID)
+		log.Debugf("killing old running container %s", container.ID)
 
 		existingPid := container.State.Pid
 		container.State.SetStopped(0)
@@ -205,36 +234,28 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
 			var err error
 			cmd.Process, err = os.FindProcess(existingPid)
 			if err != nil {
-				utils.Debugf("cannot find existing process for %d", existingPid)
+				log.Debugf("cannot find existing process for %d", existingPid)
 			}
 			daemon.execDriver.Terminate(cmd)
 		}
 
 		if err := container.Unmount(); err != nil {
-			utils.Debugf("unmount error %s", err)
+			log.Debugf("unmount error %s", err)
 		}
 		if err := container.ToDisk(); err != nil {
-			utils.Debugf("saving stopped state to disk %s", err)
+			log.Debugf("saving stopped state to disk %s", err)
 		}
 
 		info := daemon.execDriver.Info(container.ID)
 		if !info.IsRunning() {
-			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
+			log.Debugf("Container %s was supposed to be running but is not.", container.ID)
 
-			utils.Debugf("Marking as stopped")
+			log.Debugf("Marking as stopped")
 
 			container.State.SetStopped(-127)
 			if err := container.ToDisk(); err != nil {
 				return err
 			}
-
-			if daemon.config.AutoRestart {
-				utils.Debugf("Marking as restarting")
-
-				if containersToStart != nil {
-					*containersToStart = append(*containersToStart, container)
-				}
-			}
 		}
 	}
 	return nil
@@ -249,13 +270,13 @@ func (daemon *Daemon) ensureName(container *Container) error {
 		container.Name = name
 
 		if err := container.ToDisk(); err != nil {
-			utils.Debugf("Error saving container name %s", err)
+			log.Debugf("Error saving container name %s", err)
 		}
 	}
 	return nil
 }
 
-func (daemon *Daemon) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
+func (daemon *Daemon) LogToDisk(src *broadcastwriter.BroadcastWriter, dst, stream string) error {
 	log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
 	if err != nil {
 		return err
@@ -264,56 +285,15 @@ func (daemon *Daemon) LogToDisk(src *utils.WriteBroadcaster, dst, stream string)
 	return nil
 }
 
-// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
-func (daemon *Daemon) Destroy(container *Container) error {
-	if container == nil {
-		return fmt.Errorf("The given container is <nil>")
-	}
-
-	element := daemon.containers.Get(container.ID)
-	if element == nil {
-		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
-	}
-
-	if err := container.Stop(3); err != nil {
-		return err
-	}
-
-	// Deregister the container before removing its directory, to avoid race conditions
-	daemon.idIndex.Delete(container.ID)
-	daemon.containers.Delete(container.ID)
-
-	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
-		utils.Debugf("Unable to remove container from link graph: %s", err)
-	}
-
-	if err := daemon.driver.Remove(container.ID); err != nil {
-		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
-	}
-
-	initID := fmt.Sprintf("%s-init", container.ID)
-	if err := daemon.driver.Remove(initID); err != nil {
-		return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
-	}
-
-	if err := os.RemoveAll(container.root); err != nil {
-		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
-	}
-	selinux.FreeLxcContexts(container.ProcessLabel)
-
-	return nil
-}
-
 func (daemon *Daemon) restore() error {
 	var (
-		debug             = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
-		containers        = make(map[string]*Container)
-		currentDriver     = daemon.driver.String()
-		containersToStart = []*Container{}
+		debug         = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
+		containers    = make(map[string]*Container)
+		currentDriver = daemon.driver.String()
 	)
 
 	if !debug {
-		fmt.Printf("Loading containers: ")
+		log.Infof("Loading containers: ")
 	}
 	dir, err := ioutil.ReadDir(daemon.repository)
 	if err != nil {
@@ -327,29 +307,38 @@ func (daemon *Daemon) restore() error {
 			fmt.Print(".")
 		}
 		if err != nil {
-			utils.Errorf("Failed to load container %v: %v", id, err)
+			log.Errorf("Failed to load container %v: %v", id, err)
 			continue
 		}
 
 		// Ignore the container if it does not support the current driver being used by the graph
-		if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver {
-			utils.Debugf("Loaded container %v", container.ID)
+		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
+			log.Debugf("Loaded container %v", container.ID)
+
 			containers[container.ID] = container
 		} else {
-			utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
+			log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
 		}
 	}
 
+	registeredContainers := []*Container{}
+
 	if entities := daemon.containerGraph.List("/", -1); entities != nil {
 		for _, p := range entities.Paths() {
 			if !debug {
 				fmt.Print(".")
 			}
+
 			e := entities[p]
+
 			if container, ok := containers[e.ID()]; ok {
-				if err := daemon.register(container, false, &containersToStart); err != nil {
-					utils.Debugf("Failed to register container %s: %s", container.ID, err)
+				if err := daemon.register(container, false); err != nil {
+					log.Debugf("Failed to register container %s: %s", container.ID, err)
 				}
+
+				registeredContainers = append(registeredContainers, container)
+
+				// delete from the map so that a new name is not automatically generated
 				delete(containers, e.ID())
 			}
 		}
@@ -360,69 +349,37 @@ func (daemon *Daemon) restore() error {
 		// Try to set the default name for a container if it exists prior to links
 		container.Name, err = daemon.generateNewName(container.ID)
 		if err != nil {
-			utils.Debugf("Setting default id - %s", err)
-		}
-		if err := daemon.register(container, false, &containersToStart); err != nil {
-			utils.Debugf("Failed to register container %s: %s", container.ID, err)
+			log.Debugf("Setting default id - %s", err)
 		}
-	}
 
-	for _, container := range containersToStart {
-		utils.Debugf("Starting container %d", container.ID)
-		if err := container.Start(); err != nil {
-			utils.Debugf("Failed to start container %s: %s", container.ID, err)
+		if err := daemon.register(container, false); err != nil {
+			log.Debugf("Failed to register container %s: %s", container.ID, err)
 		}
-	}
 
-	if !debug {
-		fmt.Printf(": done.\n")
+		registeredContainers = append(registeredContainers, container)
 	}
 
-	return nil
-}
+	// check the restart policy on the containers and restart any container with
+	// the restart policy of "always"
+	if daemon.config.AutoRestart {
+		log.Debugf("Restarting containers...")
 
-// Create creates a new container from the given configuration with a given name.
-func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) {
-	var (
-		container *Container
-		warnings  []string
-	)
+		for _, container := range registeredContainers {
+			if container.hostConfig.RestartPolicy.Name == "always" ||
+				(container.hostConfig.RestartPolicy.Name == "on-failure" && container.State.ExitCode != 0) {
+				log.Debugf("Starting container %s", container.ID)
 
-	img, err := daemon.repositories.LookupImage(config.Image)
-	if err != nil {
-		return nil, nil, err
-	}
-	if err := daemon.checkImageDepth(img); err != nil {
-		return nil, nil, err
-	}
-	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
-		return nil, nil, err
-	}
-	if container, err = daemon.newContainer(name, config, img); err != nil {
-		return nil, nil, err
-	}
-	if err := daemon.createRootfs(container, img); err != nil {
-		return nil, nil, err
-	}
-	if err := container.ToDisk(); err != nil {
-		return nil, nil, err
-	}
-	if err := daemon.Register(container); err != nil {
-		return nil, nil, err
+				if err := container.Start(); err != nil {
+					log.Debugf("Failed to start container %s: %s", container.ID, err)
+				}
+			}
+		}
 	}
-	return container, warnings, nil
-}
 
-func (daemon *Daemon) checkImageDepth(img *image.Image) error {
-	// We add 2 layers to the depth because the container's rw and
-	// init layer add to the restriction
-	depth, err := img.Depth()
-	if err != nil {
-		return err
-	}
-	if depth+2 >= MaxImageDepth {
-		return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth)
+	if !debug {
+		log.Infof(": done.")
 	}
+
 	return nil
 }
 
@@ -618,51 +575,6 @@ func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error
 	return nil
 }
 
-// Commit creates a new filesystem image from the current state of a container.
-// The image can optionally be tagged into a repository
-func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
-	if pause {
-		container.Pause()
-		defer container.Unpause()
-	}
-
-	if err := container.Mount(); err != nil {
-		return nil, err
-	}
-	defer container.Unmount()
-
-	rwTar, err := container.ExportRw()
-	if err != nil {
-		return nil, err
-	}
-	defer rwTar.Close()
-
-	// Create a new image from the container's base layers + a new layer from container changes
-	var (
-		containerID, containerImage string
-		containerConfig             *runconfig.Config
-	)
-
-	if container != nil {
-		containerID = container.ID
-		containerImage = container.Image
-		containerConfig = container.Config
-	}
-
-	img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
-	if err != nil {
-		return nil, err
-	}
-
-	// Register the image if needed
-	if repository != "" {
-		if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
-			return img, err
-		}
-	}
-	return img, nil
-}
-
 func GetFullContainerName(name string) (string, error) {
 	if name == "" {
 		return "", fmt.Errorf("Container name cannot be empty")
@@ -723,7 +635,7 @@ func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error
 func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
 	if hostConfig != nil && hostConfig.Links != nil {
 		for _, l := range hostConfig.Links {
-			parts, err := utils.PartParser("name:alias", l)
+			parts, err := parsers.PartParser("name:alias", l)
 			if err != nil {
 				return err
 			}
@@ -750,7 +662,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 }
 
 // FIXME: harmonize with NewGraph()
-func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
+func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) {
 	daemon, err := NewDaemonFromDirectory(config, eng)
 	if err != nil {
 		return nil, err
@@ -758,11 +670,71 @@ func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error)
 	return daemon, nil
 }
 
-func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
+func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) {
+	// Apply configuration defaults
+	if config.Mtu == 0 {
+		// FIXME: GetDefaultNetwork Mtu doesn't need to be public anymore
+		config.Mtu = GetDefaultNetworkMtu()
+	}
+	// Check for mutually incompatible config options
+	if config.BridgeIface != "" && config.BridgeIP != "" {
+		return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.")
+	}
+	if !config.EnableIptables && !config.InterContainerCommunication {
+		return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.")
+	}
+	// FIXME: DisableNetworkBidge doesn't need to be public anymore
+	config.DisableNetwork = config.BridgeIface == DisableNetworkBridge
+
+	// Claim the pidfile first, to avoid any and all unexpected race conditions.
+	// Some of the init doesn't need a pidfile lock - but let's not try to be smart.
+	if config.Pidfile != "" {
+		if err := utils.CreatePidFile(config.Pidfile); err != nil {
+			return nil, err
+		}
+		eng.OnShutdown(func() {
+			// Always release the pidfile last, just in case
+			utils.RemovePidFile(config.Pidfile)
+		})
+	}
+
+	// Check that the system is supported and we have sufficient privileges
+	// FIXME: return errors instead of calling Fatal
+	if runtime.GOOS != "linux" {
+		log.Fatalf("The Docker daemon is only supported on linux")
+	}
+	if os.Geteuid() != 0 {
+		log.Fatalf("The Docker daemon needs to be run as root")
+	}
+	if err := checkKernelAndArch(); err != nil {
+		log.Fatalf(err.Error())
+	}
+
+	// set up the TempDir to use a canonical path
+	tmp, err := utils.TempDir(config.Root)
+	if err != nil {
+		log.Fatalf("Unable to get the TempDir under %s: %s", config.Root, err)
+	}
+	realTmp, err := utils.ReadSymlinkedDirectory(tmp)
+	if err != nil {
+		log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
+	}
+	os.Setenv("TMPDIR", realTmp)
 	if !config.EnableSelinuxSupport {
-		selinux.SetDisabled()
+		selinuxSetDisabled()
 	}
 
+	// get the canonical path to the Docker root directory
+	var realRoot string
+	if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
+		realRoot = config.Root
+	} else {
+		realRoot, err = utils.ReadSymlinkedDirectory(config.Root)
+		if err != nil {
+			log.Fatalf("Unable to get the full path to root (%s): %s", config.Root, err)
+		}
+	}
+	config.Root = realRoot
 	// Create the root directory if it doesn't exists
 	if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) {
 		return nil, err
@@ -776,7 +748,12 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
 	if err != nil {
 		return nil, err
 	}
-	utils.Debugf("Using graph driver %s", driver)
+	log.Debugf("Using graph driver %s", driver)
+
+	// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled
+	if config.EnableSelinuxSupport && driver.String() == "btrfs" {
+		return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver!")
+	}
 
 	daemonRepo := path.Join(config.Root, "containers")
 
@@ -789,7 +766,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
 		return nil, err
 	}
 
-	utils.Debugf("Creating images graph")
+	log.Debugf("Creating images graph")
 	g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver)
 	if err != nil {
 		return nil, err
@@ -801,12 +778,12 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
 	if err != nil {
 		return nil, err
 	}
-	utils.Debugf("Creating volumes graph")
+	log.Debugf("Creating volumes graph")
 	volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver)
 	if err != nil {
 		return nil, err
 	}
-	utils.Debugf("Creating repository list")
+	log.Debugf("Creating repository list")
 	repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g)
 	if err != nil {
 		return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
@@ -873,34 +850,52 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
 		sysInitPath:    sysInitPath,
 		execDriver:     ed,
 		eng:            eng,
-		Sockets:        config.Sockets,
 	}
-
 	if err := daemon.checkLocaldns(); err != nil {
 		return nil, err
 	}
 	if err := daemon.restore(); err != nil {
 		return nil, err
 	}
+	// Setup shutdown handlers
+	// FIXME: can these shutdown handlers be registered closer to their source?
+	eng.OnShutdown(func() {
+		// FIXME: if these cleanup steps can be called concurrently, register
+		// them as separate handlers to speed up total shutdown time
+		// FIXME: use engine logging instead of log.Errorf
+		if err := daemon.shutdown(); err != nil {
+			log.Errorf("daemon.shutdown(): %s", err)
+		}
+		if err := portallocator.ReleaseAll(); err != nil {
+			log.Errorf("portallocator.ReleaseAll(): %s", err)
+		}
+		if err := daemon.driver.Cleanup(); err != nil {
+			log.Errorf("daemon.driver.Cleanup(): %s", err.Error())
+		}
+		if err := daemon.containerGraph.Close(); err != nil {
+			log.Errorf("daemon.containerGraph.Close(): %s", err.Error())
+		}
+	})
+
 	return daemon, nil
 }
 
 func (daemon *Daemon) shutdown() error {
 	group := sync.WaitGroup{}
-	utils.Debugf("starting clean shutdown of all containers...")
+	log.Debugf("starting clean shutdown of all containers...")
 	for _, container := range daemon.List() {
 		c := container
 		if c.State.IsRunning() {
-			utils.Debugf("stopping %s", c.ID)
+			log.Debugf("stopping %s", c.ID)
 			group.Add(1)
 
 			go func() {
 				defer group.Done()
 				if err := c.KillSig(15); err != nil {
-					utils.Debugf("kill 15 error for %s - %s", c.ID, err)
+					log.Debugf("kill 15 error for %s - %s", c.ID, err)
 				}
 				c.State.WaitStop(-1 * time.Second)
-				utils.Debugf("container stopped %s", c.ID)
+				log.Debugf("container stopped %s", c.ID)
 			}()
 		}
 	}
@@ -909,30 +904,6 @@ func (daemon *Daemon) shutdown() error {
 	return nil
 }
 
-func (daemon *Daemon) Close() error {
-	errorsStrings := []string{}
-	if err := daemon.shutdown(); err != nil {
-		utils.Errorf("daemon.shutdown(): %s", err)
-		errorsStrings = append(errorsStrings, err.Error())
-	}
-	if err := portallocator.ReleaseAll(); err != nil {
-		utils.Errorf("portallocator.ReleaseAll(): %s", err)
-		errorsStrings = append(errorsStrings, err.Error())
-	}
-	if err := daemon.driver.Cleanup(); err != nil {
-		utils.Errorf("daemon.driver.Cleanup(): %s", err.Error())
-		errorsStrings = append(errorsStrings, err.Error())
-	}
-	if err := daemon.containerGraph.Close(); err != nil {
-		utils.Errorf("daemon.containerGraph.Close(): %s", err.Error())
-		errorsStrings = append(errorsStrings, err.Error())
-	}
-	if len(errorsStrings) > 0 {
-		return fmt.Errorf("%s", strings.Join(errorsStrings, ", "))
-	}
-	return nil
-}
-
 func (daemon *Daemon) Mount(container *Container) error {
 	dir, err := daemon.driver.Get(container.ID, container.GetMountLabel())
 	if err != nil {
@@ -1023,6 +994,8 @@ func (daemon *Daemon) Kill(c *Container, sig int) error {
 // from the content root, including images, volumes and
 // container filesystems.
 // Again: this will remove your entire docker daemon!
+// FIXME: this is deprecated, and only used in legacy
+// tests. Please remove.
 func (daemon *Daemon) Nuke() error {
 	var wg sync.WaitGroup
 	for _, container := range daemon.List() {
@@ -1033,7 +1006,6 @@ func (daemon *Daemon) Nuke() error {
 		}(container)
 	}
 	wg.Wait()
-	daemon.Close()
 
 	return os.RemoveAll(daemon.config.Root)
 }
@@ -1050,7 +1022,7 @@ func (daemon *Daemon) Repositories() *graph.TagStore {
 	return daemon.repositories
 }
 
-func (daemon *Daemon) Config() *daemonconfig.Config {
+func (daemon *Daemon) Config() *Config {
 	return daemon.config
 }
 
@@ -1078,18 +1050,70 @@ func (daemon *Daemon) ContainerGraph() *graphdb.Database {
 	return daemon.containerGraph
 }
 
-func (daemon *Daemon) SetServer(server Server) {
-	daemon.srv = server
-}
-
 func (daemon *Daemon) checkLocaldns() error {
 	resolvConf, err := resolvconf.Get()
 	if err != nil {
 		return err
 	}
 	if len(daemon.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
-		log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
+		log.Infof("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v", DefaultDns)
 		daemon.config.Dns = DefaultDns
 	}
 	return nil
 }
+
+func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
+	// Retrieve all images
+	images, err := daemon.Graph().Map()
+	if err != nil {
+		return nil, err
+	}
+
+	// Store the tree in a map of map (map[parentId][childId])
+	imageMap := make(map[string]map[string]struct{})
+	for _, img := range images {
+		if _, exists := imageMap[img.Parent]; !exists {
+			imageMap[img.Parent] = make(map[string]struct{})
+		}
+		imageMap[img.Parent][img.ID] = struct{}{}
+	}
+
+	// Loop on the children of the given image and check the config
+	var match *image.Image
+	for elem := range imageMap[imgID] {
+		img, err := daemon.Graph().Get(elem)
+		if err != nil {
+			return nil, err
+		}
+		if runconfig.Compare(&img.ContainerConfig, config) {
+			if match == nil || match.Created.Before(img.Created) {
+				match = img
+			}
+		}
+	}
+	return match, nil
+}
+
+func checkKernelAndArch() error {
+	// Check for unsupported architectures
+	if runtime.GOARCH != "amd64" {
+		return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
+	}
+	// Check for unsupported kernel versions
+	// FIXME: it would be cleaner to not test for specific versions, but rather
+	// test for specific functionalities.
+	// Unfortunately we can't test for the feature "does not cause a kernel panic"
+	// without actually causing a kernel panic, so we need this workaround until
+	// the circumstances of pre-3.8 crashes are clearer.
+	// For details see http://github.com/docker/docker/issues/407
+	if k, err := kernel.GetKernelVersion(); err != nil {
+		log.Infof("WARNING: %s", err)
+	} else {
+		if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
+			if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
+				log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+			}
+		}
+	}
+	return nil
+}

+ 5 - 5
daemon/daemon_aufs.go

@@ -3,17 +3,17 @@
 package daemon
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/daemon/graphdriver/aufs"
-	"github.com/dotcloud/docker/graph"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/daemon/graphdriver/aufs"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/pkg/log"
 )
 
 // Given the graphdriver ad, if it is aufs, then migrate it.
 // If aufs driver is not built, this func is a noop.
 func migrateIfAufs(driver graphdriver.Driver, root string) error {
 	if ad, ok := driver.(*aufs.Driver); ok {
-		utils.Debugf("Migrating existing containers")
+		log.Debugf("Migrating existing containers")
 		if err := ad.Migrate(root, graph.SetupInitLayer); err != nil {
 			return err
 		}

+ 1 - 1
daemon/daemon_btrfs.go

@@ -3,5 +3,5 @@
 package daemon
 
 import (
-	_ "github.com/dotcloud/docker/daemon/graphdriver/btrfs"
+	_ "github.com/docker/docker/daemon/graphdriver/btrfs"
 )

+ 1 - 1
daemon/daemon_devicemapper.go

@@ -3,5 +3,5 @@
 package daemon
 
 import (
-	_ "github.com/dotcloud/docker/daemon/graphdriver/devmapper"
+	_ "github.com/docker/docker/daemon/graphdriver/devmapper"
 )

+ 1 - 1
daemon/daemon_no_aufs.go

@@ -3,7 +3,7 @@
 package daemon
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver"
+	"github.com/docker/docker/daemon/graphdriver"
 )
 
 func migrateIfAufs(driver graphdriver.Driver, root string) error {

+ 174 - 0
daemon/delete.go

@@ -0,0 +1,174 @@
+package daemon
+
+import (
+	"fmt"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+)
+
+// FIXME: rename to ContainerRemove for consistency with the CLI command.
+func (daemon *Daemon) ContainerDestroy(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
+	}
+	name := job.Args[0]
+	removeVolume := job.GetenvBool("removeVolume")
+	removeLink := job.GetenvBool("removeLink")
+	forceRemove := job.GetenvBool("forceRemove")
+	container := daemon.Get(name)
+
+	if removeLink {
+		if container == nil {
+			return job.Errorf("No such link: %s", name)
+		}
+		name, err := GetFullContainerName(name)
+		if err != nil {
+			job.Error(err)
+		}
+		parent, n := path.Split(name)
+		if parent == "/" {
+			return job.Errorf("Conflict, cannot remove the default name of the container")
+		}
+		pe := daemon.ContainerGraph().Get(parent)
+		if pe == nil {
+			return job.Errorf("Cannot get parent %s for name %s", parent, name)
+		}
+		parentContainer := daemon.Get(pe.ID())
+
+		if parentContainer != nil {
+			parentContainer.DisableLink(n)
+		}
+
+		if err := daemon.ContainerGraph().Delete(name); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	}
+
+	if container != nil {
+		if container.State.IsRunning() {
+			if forceRemove {
+				if err := container.Kill(); err != nil {
+					return job.Errorf("Could not kill running container, cannot remove - %v", err)
+				}
+			} else {
+				return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f")
+			}
+		}
+		if err := daemon.Destroy(container); err != nil {
+			return job.Errorf("Cannot destroy container %s: %s", name, err)
+		}
+		container.LogEvent("destroy")
+
+		if removeVolume {
+			var (
+				volumes     = make(map[string]struct{})
+				binds       = make(map[string]struct{})
+				usedVolumes = make(map[string]*Container)
+			)
+
+			// the volume id is always the base of the path
+			getVolumeId := func(p string) string {
+				return filepath.Base(strings.TrimSuffix(p, "/layer"))
+			}
+
+			// populate bind map so that they can be skipped and not removed
+			for _, bind := range container.HostConfig().Binds {
+				source := strings.Split(bind, ":")[0]
+				// TODO: refactor all volume stuff, all of it
+				// it is very important that we eval the link or comparing the keys to container.Volumes will not work
+				//
+				// eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
+				p, err := filepath.EvalSymlinks(source)
+				if err != nil && !os.IsNotExist(err) {
+					return job.Error(err)
+				}
+				if p != "" {
+					source = p
+				}
+				binds[source] = struct{}{}
+			}
+
+			// Store all the deleted containers volumes
+			for _, volumeId := range container.Volumes {
+				// Skip the volumes mounted from external
+				// bind mounts here will will be evaluated for a symlink
+				if _, exists := binds[volumeId]; exists {
+					continue
+				}
+
+				volumeId = getVolumeId(volumeId)
+				volumes[volumeId] = struct{}{}
+			}
+
+			// Retrieve all volumes from all remaining containers
+			for _, container := range daemon.List() {
+				for _, containerVolumeId := range container.Volumes {
+					containerVolumeId = getVolumeId(containerVolumeId)
+					usedVolumes[containerVolumeId] = container
+				}
+			}
+
+			for volumeId := range volumes {
+				// If the requested volu
+				if c, exists := usedVolumes[volumeId]; exists {
+					log.Infof("The volume %s is used by the container %s. Impossible to remove it. Skipping.", volumeId, c.ID)
+					continue
+				}
+				if err := daemon.Volumes().Delete(volumeId); err != nil {
+					return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
+				}
+			}
+		}
+	} else {
+		return job.Errorf("No such container: %s", name)
+	}
+	return engine.StatusOK
+}
+
+// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
+// FIXME: rename to Rm for consistency with the CLI command
+func (daemon *Daemon) Destroy(container *Container) error {
+	if container == nil {
+		return fmt.Errorf("The given container is <nil>")
+	}
+
+	element := daemon.containers.Get(container.ID)
+	if element == nil {
+		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
+	}
+
+	if err := container.Stop(3); err != nil {
+		return err
+	}
+
+	// Deregister the container before removing its directory, to avoid race conditions
+	daemon.idIndex.Delete(container.ID)
+	daemon.containers.Delete(container.ID)
+
+	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
+		log.Debugf("Unable to remove container from link graph: %s", err)
+	}
+
+	if err := daemon.driver.Remove(container.ID); err != nil {
+		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
+	}
+
+	initID := fmt.Sprintf("%s-init", container.ID)
+	if err := daemon.driver.Remove(initID); err != nil {
+		return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
+	}
+
+	if err := os.RemoveAll(container.root); err != nil {
+		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
+	}
+
+	selinuxFreeLxcContexts(container.ProcessLabel)
+
+	return nil
+}

+ 3 - 41
daemon/execdriver/driver.go

@@ -20,47 +20,7 @@ var (
 	ErrDriverNotFound          = errors.New("The requested docker init has not been found")
 )
 
-var dockerInitFcts map[string]InitFunc
-
-type (
-	StartCallback func(*Command)
-	InitFunc      func(i *InitArgs) error
-)
-
-func RegisterInitFunc(name string, fct InitFunc) error {
-	if dockerInitFcts == nil {
-		dockerInitFcts = make(map[string]InitFunc)
-	}
-	if _, ok := dockerInitFcts[name]; ok {
-		return ErrDriverAlreadyRegistered
-	}
-	dockerInitFcts[name] = fct
-	return nil
-}
-
-func GetInitFunc(name string) (InitFunc, error) {
-	fct, ok := dockerInitFcts[name]
-	if !ok {
-		return nil, ErrDriverNotFound
-	}
-	return fct, nil
-}
-
-// Args provided to the init function for a driver
-type InitArgs struct {
-	User       string
-	Gateway    string
-	Ip         string
-	WorkDir    string
-	Privileged bool
-	Env        []string
-	Args       []string
-	Mtu        int
-	Driver     string
-	Console    string
-	Pipe       int
-	Root       string
-}
+type StartCallback func(*Command)
 
 // Driver specific information based on
 // processes registered with the driver
@@ -140,6 +100,8 @@ type Command struct {
 	Mounts             []Mount             `json:"mounts"`
 	AllowedDevices     []*devices.Device   `json:"allowed_devices"`
 	AutoCreatedDevices []*devices.Device   `json:"autocreated_devices"`
+	CapAdd             []string            `json:"cap_add"`
+	CapDrop            []string            `json:"cap_drop"`
 
 	Terminal     Terminal `json:"-"`             // standard or tty terminal
 	Console      string   `json:"-"`             // dev/console path

+ 5 - 5
daemon/execdriver/execdrivers/execdrivers.go

@@ -2,10 +2,10 @@ package execdrivers
 
 import (
 	"fmt"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/execdriver/lxc"
-	"github.com/dotcloud/docker/daemon/execdriver/native"
-	"github.com/dotcloud/docker/pkg/sysinfo"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/lxc"
+	"github.com/docker/docker/daemon/execdriver/native"
+	"github.com/docker/docker/pkg/sysinfo"
 	"path"
 )
 
@@ -15,7 +15,7 @@ func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdrive
 		// we want to give the lxc driver the full docker root because it needs
 		// to access and write config and template files in /var/lib/docker/containers/*
 		// to be backwards compatible
-		return lxc.NewDriver(root, sysInfo.AppArmor)
+		return lxc.NewDriver(root, initPath, sysInfo.AppArmor)
 	case "native":
 		return native.NewDriver(path.Join(root, "execdriver", "native"), initPath)
 	}

+ 115 - 39
daemon/execdriver/lxc/driver.go

@@ -3,69 +3,47 @@ package lxc
 import (
 	"encoding/json"
 	"fmt"
+	"io"
 	"io/ioutil"
-	"log"
 	"os"
 	"os/exec"
 	"path"
 	"path/filepath"
-	"runtime"
 	"strconv"
 	"strings"
 	"syscall"
 	"time"
 
+	"github.com/kr/pty"
+
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/utils"
 	"github.com/docker/libcontainer/cgroups"
 	"github.com/docker/libcontainer/label"
 	"github.com/docker/libcontainer/mount/nodes"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/utils"
 )
 
 const DriverName = "lxc"
 
-func init() {
-	execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
-		runtime.LockOSThread()
-		if err := setupEnv(args); err != nil {
-			return err
-		}
-		if err := setupHostname(args); err != nil {
-			return err
-		}
-		if err := setupNetworking(args); err != nil {
-			return err
-		}
-		if err := finalizeNamespace(args); err != nil {
-			return err
-		}
-
-		path, err := exec.LookPath(args.Args[0])
-		if err != nil {
-			log.Printf("Unable to locate %v", args.Args[0])
-			os.Exit(127)
-		}
-		if err := syscall.Exec(path, args.Args, os.Environ()); err != nil {
-			return fmt.Errorf("dockerinit unable to execute %s - %s", path, err)
-		}
-		panic("Unreachable")
-	})
-}
-
 type driver struct {
 	root       string // root path for the driver to use
+	initPath   string
 	apparmor   bool
 	sharedRoot bool
 }
 
-func NewDriver(root string, apparmor bool) (*driver, error) {
+func NewDriver(root, initPath string, apparmor bool) (*driver, error) {
 	// setup unconfined symlink
 	if err := linkLxcStart(root); err != nil {
 		return nil, err
 	}
+
 	return &driver{
 		apparmor:   apparmor,
 		root:       root,
+		initPath:   initPath,
 		sharedRoot: rootIsShared(),
 	}, nil
 }
@@ -76,9 +54,25 @@ func (d *driver) Name() string {
 }
 
 func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
-	if err := execdriver.SetTerminal(c, pipes); err != nil {
-		return -1, err
+	var (
+		term execdriver.Terminal
+		err  error
+	)
+
+	if c.Tty {
+		term, err = NewTtyConsole(c, pipes)
+	} else {
+		term, err = execdriver.NewStdConsole(c, pipes)
 	}
+	c.Terminal = term
+
+	c.Mounts = append(c.Mounts, execdriver.Mount{
+		Source:      d.initPath,
+		Destination: c.InitPath,
+		Writable:    false,
+		Private:     true,
+	})
+
 	if err := d.generateEnvConfig(c); err != nil {
 		return -1, err
 	}
@@ -92,8 +86,6 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		"-f", configPath,
 		"--",
 		c.InitPath,
-		"-driver",
-		DriverName,
 	}
 
 	if c.Network.Interface != nil {
@@ -122,6 +114,14 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		params = append(params, "-w", c.WorkingDir)
 	}
 
+	if len(c.CapAdd) > 0 {
+		params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":")))
+	}
+
+	if len(c.CapDrop) > 0 {
+		params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":")))
+	}
+
 	params = append(params, "--", c.Entrypoint)
 	params = append(params, c.Arguments...)
 
@@ -320,7 +320,7 @@ func (i *info) IsRunning() bool {
 
 	output, err := i.driver.getInfo(i.ID)
 	if err != nil {
-		utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
+		log.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
 		return false
 	}
 	if strings.Contains(string(output), "RUNNING") {
@@ -447,7 +447,83 @@ func (d *driver) generateEnvConfig(c *execdriver.Command) error {
 		return err
 	}
 	p := path.Join(d.root, "containers", c.ID, "config.env")
-	c.Mounts = append(c.Mounts, execdriver.Mount{p, "/.dockerenv", false, true})
+	c.Mounts = append(c.Mounts, execdriver.Mount{
+		Source:      p,
+		Destination: "/.dockerenv",
+		Writable:    false,
+		Private:     true,
+	})
 
 	return ioutil.WriteFile(p, data, 0600)
 }
+
+type TtyConsole struct {
+	MasterPty *os.File
+	SlavePty  *os.File
+}
+
+func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
+	// lxc is special in that we cannot create the master outside of the container without
+	// opening the slave because we have nothing to provide to the cmd.  We have to open both then do
+	// the crazy setup on command right now instead of passing the console path to lxc and telling it
+	// to open up that console.  we save a couple of openfiles in the native driver because we can do
+	// this.
+	ptyMaster, ptySlave, err := pty.Open()
+	if err != nil {
+		return nil, err
+	}
+
+	tty := &TtyConsole{
+		MasterPty: ptyMaster,
+		SlavePty:  ptySlave,
+	}
+
+	if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
+		tty.Close()
+		return nil, err
+	}
+
+	command.Console = tty.SlavePty.Name()
+
+	return tty, nil
+}
+
+func (t *TtyConsole) Master() *os.File {
+	return t.MasterPty
+}
+
+func (t *TtyConsole) Resize(h, w int) error {
+	return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
+}
+
+func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error {
+	command.Stdout = t.SlavePty
+	command.Stderr = t.SlavePty
+
+	go func() {
+		if wb, ok := pipes.Stdout.(interface {
+			CloseWriters() error
+		}); ok {
+			defer wb.CloseWriters()
+		}
+
+		io.Copy(pipes.Stdout, t.MasterPty)
+	}()
+
+	if pipes.Stdin != nil {
+		command.Stdin = t.SlavePty
+		command.SysProcAttr.Setctty = true
+
+		go func() {
+			io.Copy(t.MasterPty, pipes.Stdin)
+
+			pipes.Stdin.Close()
+		}()
+	}
+	return nil
+}
+
+func (t *TtyConsole) Close() error {
+	t.SlavePty.Close()
+	return t.MasterPty.Close()
+}

+ 103 - 6
daemon/execdriver/lxc/init.go

@@ -2,19 +2,116 @@ package lxc
 
 import (
 	"encoding/json"
+	"flag"
 	"fmt"
 	"io/ioutil"
+	"log"
 	"net"
 	"os"
+	"os/exec"
+	"runtime"
 	"strings"
 	"syscall"
 
+	"github.com/docker/docker/reexec"
 	"github.com/docker/libcontainer/netlink"
-	"github.com/dotcloud/docker/daemon/execdriver"
 )
 
+// Args provided to the init function for a driver
+type InitArgs struct {
+	User       string
+	Gateway    string
+	Ip         string
+	WorkDir    string
+	Privileged bool
+	Env        []string
+	Args       []string
+	Mtu        int
+	Console    string
+	Pipe       int
+	Root       string
+	CapAdd     string
+	CapDrop    string
+}
+
+func init() {
+	// like always lxc requires a hack to get this to work
+	reexec.Register("/.dockerinit", dockerInititalizer)
+}
+
+func dockerInititalizer() {
+	initializer()
+}
+
+// initializer is the lxc driver's init function that is run inside the namespace to setup
+// additional configurations
+func initializer() {
+	runtime.LockOSThread()
+
+	args := getArgs()
+
+	if err := setupNamespace(args); err != nil {
+		log.Fatal(err)
+	}
+}
+
+func setupNamespace(args *InitArgs) error {
+	if err := setupEnv(args); err != nil {
+		return err
+	}
+	if err := setupHostname(args); err != nil {
+		return err
+	}
+	if err := setupNetworking(args); err != nil {
+		return err
+	}
+	if err := finalizeNamespace(args); err != nil {
+		return err
+	}
+
+	path, err := exec.LookPath(args.Args[0])
+	if err != nil {
+		log.Printf("Unable to locate %v", args.Args[0])
+		os.Exit(127)
+	}
+
+	if err := syscall.Exec(path, args.Args, os.Environ()); err != nil {
+		return fmt.Errorf("dockerinit unable to execute %s - %s", path, err)
+	}
+
+	return nil
+}
+
+func getArgs() *InitArgs {
+	var (
+		// Get cmdline arguments
+		user       = flag.String("u", "", "username or uid")
+		gateway    = flag.String("g", "", "gateway address")
+		ip         = flag.String("i", "", "ip address")
+		workDir    = flag.String("w", "", "workdir")
+		privileged = flag.Bool("privileged", false, "privileged mode")
+		mtu        = flag.Int("mtu", 1500, "interface mtu")
+		capAdd     = flag.String("cap-add", "", "capabilities to add")
+		capDrop    = flag.String("cap-drop", "", "capabilities to drop")
+	)
+
+	flag.Parse()
+
+	return &InitArgs{
+		User:       *user,
+		Gateway:    *gateway,
+		Ip:         *ip,
+		WorkDir:    *workDir,
+		Privileged: *privileged,
+		Args:       flag.Args(),
+		Mtu:        *mtu,
+		CapAdd:     *capAdd,
+		CapDrop:    *capDrop,
+	}
+}
+
 // Clear environment pollution introduced by lxc-start
-func setupEnv(args *execdriver.InitArgs) error {
+func setupEnv(args *InitArgs) error {
 	// Get env
 	var env []string
 	content, err := ioutil.ReadFile(".dockerenv")
@@ -41,7 +138,7 @@ func setupEnv(args *execdriver.InitArgs) error {
 	return nil
 }
 
-func setupHostname(args *execdriver.InitArgs) error {
+func setupHostname(args *InitArgs) error {
 	hostname := getEnv(args, "HOSTNAME")
 	if hostname == "" {
 		return nil
@@ -50,7 +147,7 @@ func setupHostname(args *execdriver.InitArgs) error {
 }
 
 // Setup networking
-func setupNetworking(args *execdriver.InitArgs) error {
+func setupNetworking(args *InitArgs) error {
 	if args.Ip != "" {
 		// eth0
 		iface, err := net.InterfaceByName("eth0")
@@ -95,7 +192,7 @@ func setupNetworking(args *execdriver.InitArgs) error {
 }
 
 // Setup working directory
-func setupWorkingDirectory(args *execdriver.InitArgs) error {
+func setupWorkingDirectory(args *InitArgs) error {
 	if args.WorkDir == "" {
 		return nil
 	}
@@ -105,7 +202,7 @@ func setupWorkingDirectory(args *execdriver.InitArgs) error {
 	return nil
 }
 
-func getEnv(args *execdriver.InitArgs, key string) string {
+func getEnv(args *InitArgs, key string) string {
 	for _, kv := range args.Env {
 		parts := strings.SplitN(kv, "=", 2)
 		if parts[0] == key && len(parts) == 2 {

+ 23 - 7
daemon/execdriver/lxc/lxc_init_linux.go

@@ -1,24 +1,23 @@
-// +build amd64
-
 package lxc
 
 import (
 	"fmt"
+	"strings"
 	"syscall"
 
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/libcontainer/namespaces"
 	"github.com/docker/libcontainer/security/capabilities"
+	"github.com/docker/libcontainer/system"
 	"github.com/docker/libcontainer/utils"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/execdriver/native/template"
-	"github.com/dotcloud/docker/pkg/system"
 )
 
 func setHostname(hostname string) error {
 	return syscall.Sethostname([]byte(hostname))
 }
 
-func finalizeNamespace(args *execdriver.InitArgs) error {
+func finalizeNamespace(args *InitArgs) error {
 	if err := utils.CloseExecFrom(3); err != nil {
 		return err
 	}
@@ -48,8 +47,25 @@ func finalizeNamespace(args *execdriver.InitArgs) error {
 			return fmt.Errorf("clear keep caps %s", err)
 		}
 
+		var (
+			adds  []string
+			drops []string
+		)
+
+		if args.CapAdd != "" {
+			adds = strings.Split(args.CapAdd, ":")
+		}
+		if args.CapDrop != "" {
+			drops = strings.Split(args.CapDrop, ":")
+		}
+
+		caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops)
+		if err != nil {
+			return err
+		}
+
 		// drop all other capabilities
-		if err := capabilities.DropCapabilities(container.Capabilities); err != nil {
+		if err := capabilities.DropCapabilities(caps); err != nil {
 			return fmt.Errorf("drop capabilities %s", err)
 		}
 	}

+ 2 - 2
daemon/execdriver/lxc/lxc_init_unsupported.go

@@ -1,8 +1,8 @@
-// +build !linux !amd64
+// +build !linux
 
 package lxc
 
-import "github.com/dotcloud/docker/daemon/execdriver"
+import "github.com/docker/docker/daemon/execdriver"
 
 func setHostname(hostname string) error {
 	panic("Not supported on darwin")

+ 3 - 3
daemon/execdriver/lxc/lxc_template.go

@@ -4,8 +4,8 @@ import (
 	"strings"
 	"text/template"
 
+	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/daemon/execdriver"
 )
 
 const LxcTemplate = `
@@ -75,9 +75,9 @@ lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountL
 
 {{range $value := .Mounts}}
 {{if $value.Writable}}
-lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,rw 0 0
+lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw 0 0
 {{else}}
-lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,ro 0 0
+lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro 0 0
 {{end}}
 {{end}}
 

+ 5 - 3
daemon/execdriver/lxc/lxc_template_unit_test.go

@@ -1,3 +1,5 @@
+// +build linux
+
 package lxc
 
 import (
@@ -11,8 +13,8 @@ import (
 	"testing"
 	"time"
 
+	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/libcontainer/devices"
-	"github.com/dotcloud/docker/daemon/execdriver"
 )
 
 func TestLXCConfig(t *testing.T) {
@@ -35,7 +37,7 @@ func TestLXCConfig(t *testing.T) {
 		cpu    = cpuMin + rand.Intn(cpuMax-cpuMin)
 	)
 
-	driver, err := NewDriver(root, false)
+	driver, err := NewDriver(root, "", false)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -71,7 +73,7 @@ func TestCustomLxcConfig(t *testing.T) {
 
 	os.MkdirAll(path.Join(root, "containers", "1"), 0777)
 
-	driver, err := NewDriver(root, false)
+	driver, err := NewDriver(root, "", false)
 	if err != nil {
 		t.Fatal(err)
 	}

+ 1 - 1
daemon/execdriver/native/configuration/parse.go

@@ -7,8 +7,8 @@ import (
 	"strconv"
 	"strings"
 
+	"github.com/docker/docker/pkg/units"
 	"github.com/docker/libcontainer"
-	"github.com/dotcloud/docker/pkg/units"
 )
 
 type Action func(*libcontainer.Config, interface{}, string) error

+ 1 - 1
daemon/execdriver/native/configuration/parse_test.go

@@ -3,8 +3,8 @@ package configuration
 import (
 	"testing"
 
+	"github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/libcontainer/security/capabilities"
-	"github.com/dotcloud/docker/daemon/execdriver/native/template"
 )
 
 // Checks whether the expected capability is specified in the capabilities.

+ 14 - 3
daemon/execdriver/native/create.go

@@ -1,3 +1,5 @@
+// +build linux,cgo
+
 package native
 
 import (
@@ -6,14 +8,14 @@ import (
 	"os/exec"
 	"path/filepath"
 
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/native/configuration"
+	"github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/apparmor"
 	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/mount"
 	"github.com/docker/libcontainer/security/capabilities"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/execdriver/native/configuration"
-	"github.com/dotcloud/docker/daemon/execdriver/native/template"
 )
 
 // createContainer populates and configures the container type with the
@@ -42,6 +44,10 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
 		if err := d.setPrivileged(container); err != nil {
 			return nil, err
 		}
+	} else {
+		if err := d.setCapabilities(container, c); err != nil {
+			return nil, err
+		}
 	}
 
 	if err := d.setupCgroups(container, c); err != nil {
@@ -136,6 +142,11 @@ func (d *driver) setPrivileged(container *libcontainer.Config) (err error) {
 	return nil
 }
 
+func (d *driver) setCapabilities(container *libcontainer.Config, c *execdriver.Command) (err error) {
+	container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop)
+	return err
+}
+
 func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error {
 	if c.Resources != nil {
 		container.Cgroups.CpuShares = c.Resources.CpuShares

+ 82 - 51
daemon/execdriver/native/driver.go

@@ -1,8 +1,11 @@
+// +build linux,cgo
+
 package native
 
 import (
 	"encoding/json"
 	"fmt"
+	"io"
 	"io/ioutil"
 	"os"
 	"os/exec"
@@ -11,13 +14,15 @@ import (
 	"sync"
 	"syscall"
 
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/term"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/apparmor"
 	"github.com/docker/libcontainer/cgroups/fs"
 	"github.com/docker/libcontainer/cgroups/systemd"
+	consolepkg "github.com/docker/libcontainer/console"
 	"github.com/docker/libcontainer/namespaces"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/pkg/system"
+	"github.com/docker/libcontainer/system"
 )
 
 const (
@@ -25,34 +30,6 @@ const (
 	Version    = "0.2"
 )
 
-func init() {
-	execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
-		var container *libcontainer.Config
-		f, err := os.Open(filepath.Join(args.Root, "container.json"))
-		if err != nil {
-			return err
-		}
-		if err := json.NewDecoder(f).Decode(&container); err != nil {
-			f.Close()
-			return err
-		}
-		f.Close()
-
-		rootfs, err := os.Getwd()
-		if err != nil {
-			return err
-		}
-		syncPipe, err := namespaces.NewSyncPipeFromFd(0, uintptr(args.Pipe))
-		if err != nil {
-			return err
-		}
-		if err := namespaces.Init(container, rootfs, args.Console, syncPipe, args.Args); err != nil {
-			return err
-		}
-		return nil
-	})
-}
-
 type activeContainer struct {
 	container *libcontainer.Config
 	cmd       *exec.Cmd
@@ -88,6 +65,19 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	if err != nil {
 		return -1, err
 	}
+
+	var term execdriver.Terminal
+
+	if c.Tty {
+		term, err = NewTtyConsole(c, pipes)
+	} else {
+		term, err = execdriver.NewStdConsole(c, pipes)
+	}
+	if err != nil {
+		return -1, err
+	}
+	c.Terminal = term
+
 	d.Lock()
 	d.activeContainers[c.ID] = &activeContainer{
 		container: container,
@@ -99,6 +89,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		dataPath = filepath.Join(d.root, c.ID)
 		args     = append([]string{c.Entrypoint}, c.Arguments...)
 	)
+
 	if err := d.createContainerRoot(c.ID); err != nil {
 		return -1, err
 	}
@@ -108,16 +99,10 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		return -1, err
 	}
 
-	term := getTerminal(c, pipes)
-
-	return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
-		// we need to join the rootfs because namespaces will setup the rootfs and chroot
-		initPath := filepath.Join(c.Rootfs, c.InitPath)
-
+	return namespaces.Exec(container, c.Stdin, c.Stdout, c.Stderr, c.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
 		c.Path = d.initPath
 		c.Args = append([]string{
-			initPath,
-			"-driver", DriverName,
+			DriverName,
 			"-console", console,
 			"-pipe", "3",
 			"-root", filepath.Join(d.root, c.ID),
@@ -125,8 +110,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		}, args...)
 
 		// set this to nil so that when we set the clone flags anything else is reset
-		c.SysProcAttr = nil
-		system.SetCloneFlags(&c.Cmd, uintptr(namespaces.GetNamespaceFlags(container.Namespaces)))
+		c.SysProcAttr = &syscall.SysProcAttr{
+			Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
+		}
 		c.ExtraFiles = []*os.File{child}
 
 		c.Env = container.Env
@@ -194,11 +180,13 @@ func (d *driver) Terminate(p *execdriver.Command) error {
 	if err != nil {
 		return err
 	}
+
 	if state.InitStartTime == currentStartTime {
 		err = syscall.Kill(p.Process.Pid, 9)
 		syscall.Wait4(p.Process.Pid, nil, 0, nil)
 	}
 	d.removeContainerRoot(p.ID)
+
 	return err
 
 }
@@ -260,17 +248,60 @@ func getEnv(key string, env []string) string {
 	return ""
 }
 
-func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) namespaces.Terminal {
-	var term namespaces.Terminal
-	if c.Tty {
-		term = &dockerTtyTerm{
-			pipes: pipes,
-		}
-	} else {
-		term = &dockerStdTerm{
-			pipes: pipes,
+type TtyConsole struct {
+	MasterPty *os.File
+}
+
+func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
+	ptyMaster, console, err := consolepkg.CreateMasterAndConsole()
+	if err != nil {
+		return nil, err
+	}
+
+	tty := &TtyConsole{
+		MasterPty: ptyMaster,
+	}
+
+	if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
+		tty.Close()
+		return nil, err
+	}
+
+	command.Console = console
+
+	return tty, nil
+}
+
+func (t *TtyConsole) Master() *os.File {
+	return t.MasterPty
+}
+
+func (t *TtyConsole) Resize(h, w int) error {
+	return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
+}
+
+func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error {
+	go func() {
+		if wb, ok := pipes.Stdout.(interface {
+			CloseWriters() error
+		}); ok {
+			defer wb.CloseWriters()
 		}
+
+		io.Copy(pipes.Stdout, t.MasterPty)
+	}()
+
+	if pipes.Stdin != nil {
+		go func() {
+			io.Copy(t.MasterPty, pipes.Stdin)
+
+			pipes.Stdin.Close()
+		}()
 	}
-	c.Terminal = term
-	return term
+
+	return nil
+}
+
+func (t *TtyConsole) Close() error {
+	return t.MasterPty.Close()
 }

+ 13 - 0
daemon/execdriver/native/driver_unsupported.go

@@ -0,0 +1,13 @@
+// +build !linux
+
+package native
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/daemon/execdriver"
+)
+
+func NewDriver(root, initPath string) (execdriver.Driver, error) {
+	return nil, fmt.Errorf("native driver not supported on non-linux")
+}

+ 13 - 0
daemon/execdriver/native/driver_unsupported_nocgo.go

@@ -0,0 +1,13 @@
+// +build linux,!cgo
+
+package native
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/daemon/execdriver"
+)
+
+func NewDriver(root, initPath string) (execdriver.Driver, error) {
+	return nil, fmt.Errorf("native driver not supported on non-linux")
+}

+ 2 - 0
daemon/execdriver/native/info.go

@@ -1,3 +1,5 @@
+// +build linux,cgo
+
 package native
 
 import (

+ 66 - 0
daemon/execdriver/native/init.go

@@ -0,0 +1,66 @@
+// +build linux
+
+package native
+
+import (
+	"encoding/json"
+	"flag"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+
+	"github.com/docker/docker/reexec"
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/namespaces"
+	"github.com/docker/libcontainer/syncpipe"
+)
+
+func init() {
+	reexec.Register(DriverName, initializer)
+}
+
+func initializer() {
+	runtime.LockOSThread()
+
+	var (
+		pipe    = flag.Int("pipe", 0, "sync pipe fd")
+		console = flag.String("console", "", "console (pty slave) path")
+		root    = flag.String("root", ".", "root path for configuration files")
+	)
+
+	flag.Parse()
+
+	var container *libcontainer.Config
+	f, err := os.Open(filepath.Join(*root, "container.json"))
+	if err != nil {
+		writeError(err)
+	}
+
+	if err := json.NewDecoder(f).Decode(&container); err != nil {
+		f.Close()
+		writeError(err)
+	}
+	f.Close()
+
+	rootfs, err := os.Getwd()
+	if err != nil {
+		writeError(err)
+	}
+
+	syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(*pipe))
+	if err != nil {
+		writeError(err)
+	}
+
+	if err := namespaces.Init(container, rootfs, *console, syncPipe, flag.Args()); err != nil {
+		writeError(err)
+	}
+
+	panic("Unreachable")
+}
+
+func writeError(err error) {
+	fmt.Fprint(os.Stderr, err)
+	os.Exit(1)
+}

+ 2 - 0
daemon/execdriver/native/template/default_template.go

@@ -12,6 +12,7 @@ func New() *libcontainer.Config {
 		Capabilities: []string{
 			"CHOWN",
 			"DAC_OVERRIDE",
+			"FSETID",
 			"FOWNER",
 			"MKNOD",
 			"NET_RAW",
@@ -22,6 +23,7 @@ func New() *libcontainer.Config {
 			"NET_BIND_SERVICE",
 			"SYS_CHROOT",
 			"KILL",
+			"AUDIT_WRITE",
 		},
 		Namespaces: map[string]bool{
 			"NEWNS":  true,

+ 0 - 42
daemon/execdriver/native/term.go

@@ -1,42 +0,0 @@
-/*
-   These types are wrappers around the libcontainer Terminal interface so that
-   we can resuse the docker implementations where possible.
-*/
-package native
-
-import (
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"io"
-	"os"
-	"os/exec"
-)
-
-type dockerStdTerm struct {
-	execdriver.StdConsole
-	pipes *execdriver.Pipes
-}
-
-func (d *dockerStdTerm) Attach(cmd *exec.Cmd) error {
-	return d.AttachPipes(cmd, d.pipes)
-}
-
-func (d *dockerStdTerm) SetMaster(master *os.File) {
-	// do nothing
-}
-
-type dockerTtyTerm struct {
-	execdriver.TtyConsole
-	pipes *execdriver.Pipes
-}
-
-func (t *dockerTtyTerm) Attach(cmd *exec.Cmd) error {
-	go io.Copy(t.pipes.Stdout, t.MasterPty)
-	if t.pipes.Stdin != nil {
-		go io.Copy(t.MasterPty, t.pipes.Stdin)
-	}
-	return nil
-}
-
-func (t *dockerTtyTerm) SetMaster(master *os.File) {
-	t.MasterPty = master
-}

+ 0 - 80
daemon/execdriver/termconsole.go

@@ -1,90 +1,10 @@
 package execdriver
 
 import (
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/kr/pty"
 	"io"
-	"os"
 	"os/exec"
 )
 
-func SetTerminal(command *Command, pipes *Pipes) error {
-	var (
-		term Terminal
-		err  error
-	)
-	if command.Tty {
-		term, err = NewTtyConsole(command, pipes)
-	} else {
-		term, err = NewStdConsole(command, pipes)
-	}
-	if err != nil {
-		return err
-	}
-	command.Terminal = term
-	return nil
-}
-
-type TtyConsole struct {
-	MasterPty *os.File
-	SlavePty  *os.File
-}
-
-func NewTtyConsole(command *Command, pipes *Pipes) (*TtyConsole, error) {
-	ptyMaster, ptySlave, err := pty.Open()
-	if err != nil {
-		return nil, err
-	}
-	tty := &TtyConsole{
-		MasterPty: ptyMaster,
-		SlavePty:  ptySlave,
-	}
-	if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
-		tty.Close()
-		return nil, err
-	}
-	command.Console = tty.SlavePty.Name()
-	return tty, nil
-}
-
-func (t *TtyConsole) Master() *os.File {
-	return t.MasterPty
-}
-
-func (t *TtyConsole) Resize(h, w int) error {
-	return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
-}
-
-func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error {
-	command.Stdout = t.SlavePty
-	command.Stderr = t.SlavePty
-
-	go func() {
-		if wb, ok := pipes.Stdout.(interface {
-			CloseWriters() error
-		}); ok {
-			defer wb.CloseWriters()
-		}
-		io.Copy(pipes.Stdout, t.MasterPty)
-	}()
-
-	if pipes.Stdin != nil {
-		command.Stdin = t.SlavePty
-		command.SysProcAttr.Setctty = true
-
-		go func() {
-			defer pipes.Stdin.Close()
-			io.Copy(t.MasterPty, pipes.Stdin)
-		}()
-	}
-	return nil
-}
-
-func (t *TtyConsole) Close() error {
-	t.SlavePty.Close()
-	return t.MasterPty.Close()
-}
-
 type StdConsole struct {
 }
 

+ 63 - 0
daemon/execdriver/utils.go

@@ -0,0 +1,63 @@
+package execdriver
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/docker/docker/utils"
+	"github.com/docker/libcontainer/security/capabilities"
+)
+
+func TweakCapabilities(basics, adds, drops []string) ([]string, error) {
+	var (
+		newCaps []string
+		allCaps = capabilities.GetAllCapabilities()
+	)
+
+	// look for invalid cap in the drop list
+	for _, cap := range drops {
+		if strings.ToLower(cap) == "all" {
+			continue
+		}
+		if !utils.StringsContainsNoCase(allCaps, cap) {
+			return nil, fmt.Errorf("Unknown capability drop: %q", cap)
+		}
+	}
+
+	// handle --cap-add=all
+	if utils.StringsContainsNoCase(adds, "all") {
+		basics = capabilities.GetAllCapabilities()
+	}
+
+	if !utils.StringsContainsNoCase(drops, "all") {
+		for _, cap := range basics {
+			// skip `all` aready handled above
+			if strings.ToLower(cap) == "all" {
+				continue
+			}
+
+			// if we don't drop `all`, add back all the non-dropped caps
+			if !utils.StringsContainsNoCase(drops, cap) {
+				newCaps = append(newCaps, strings.ToUpper(cap))
+			}
+		}
+	}
+
+	for _, cap := range adds {
+		// skip `all` aready handled above
+		if strings.ToLower(cap) == "all" {
+			continue
+		}
+
+		if !utils.StringsContainsNoCase(allCaps, cap) {
+			return nil, fmt.Errorf("Unknown capability to add: %q", cap)
+		}
+
+		// add cap if not already in the list
+		if !utils.StringsContainsNoCase(newCaps, cap) {
+			newCaps = append(newCaps, strings.ToUpper(cap))
+		}
+	}
+
+	return newCaps, nil
+}

+ 30 - 0
daemon/export.go

@@ -0,0 +1,30 @@
+package daemon
+
+import (
+	"io"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s container_id", job.Name)
+	}
+	name := job.Args[0]
+	if container := daemon.Get(name); container != nil {
+		data, err := container.Export()
+		if err != nil {
+			return job.Errorf("%s: %s", name, err)
+		}
+		defer data.Close()
+
+		// Stream the entire contents of the container (basically a volatile snapshot)
+		if _, err := io.Copy(job.Stdout, data); err != nil {
+			return job.Errorf("%s: %s", name, err)
+		}
+		// FIXME: factor job-specific LogEvent to engine.Job.Run()
+		container.LogEvent("export")
+		return engine.StatusOK
+	}
+	return job.Errorf("No such container: %s", name)
+}

+ 7 - 6
daemon/graphdriver/aufs/aufs.go

@@ -30,11 +30,12 @@ import (
 	"sync"
 	"syscall"
 
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/log"
+	mountpk "github.com/docker/docker/pkg/mount"
+	"github.com/docker/docker/utils"
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	mountpk "github.com/dotcloud/docker/pkg/mount"
-	"github.com/dotcloud/docker/utils"
 )
 
 var (
@@ -209,7 +210,7 @@ func (a *Driver) Remove(id string) error {
 	defer a.Unlock()
 
 	if a.active[id] != 0 {
-		utils.Errorf("Warning: removing active id %s\n", id)
+		log.Errorf("Warning: removing active id %s", id)
 	}
 
 	// Make sure the dir is umounted first
@@ -378,7 +379,7 @@ func (a *Driver) Cleanup() error {
 
 	for _, id := range ids {
 		if err := a.unmount(id); err != nil {
-			utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
+			log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
 		}
 	}
 

+ 2 - 2
daemon/graphdriver/aufs/aufs_test.go

@@ -4,8 +4,8 @@ import (
 	"crypto/sha256"
 	"encoding/hex"
 	"fmt"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/graphdriver"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/graphdriver"
 	"io/ioutil"
 	"os"
 	"path"

+ 3 - 2
daemon/graphdriver/aufs/mount.go

@@ -1,14 +1,15 @@
 package aufs
 
 import (
-	"github.com/dotcloud/docker/utils"
 	"os/exec"
 	"syscall"
+
+	"github.com/docker/docker/pkg/log"
 )
 
 func Unmount(target string) error {
 	if err := exec.Command("auplink", target, "flush").Run(); err != nil {
-		utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err)
+		log.Errorf("[warning]: couldn't run auplink before unmount: %s", err)
 	}
 	if err := syscall.Unmount(target, 0); err != nil {
 		return err

+ 0 - 2
daemon/graphdriver/aufs/mount_linux.go

@@ -1,5 +1,3 @@
-// +build amd64
-
 package aufs
 
 import "syscall"

+ 1 - 1
daemon/graphdriver/aufs/mount_unsupported.go

@@ -1,4 +1,4 @@
-// +build !linux !amd64
+// +build !linux
 
 package aufs
 

+ 3 - 3
daemon/graphdriver/btrfs/btrfs.go

@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package btrfs
 
@@ -16,8 +16,8 @@ import (
 	"syscall"
 	"unsafe"
 
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/pkg/mount"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/mount"
 )
 
 func init() {

+ 1 - 1
daemon/graphdriver/btrfs/btrfs_test.go

@@ -1,7 +1,7 @@
 package btrfs
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver/graphtest"
+	"github.com/docker/docker/daemon/graphdriver/graphtest"
 	"testing"
 )
 

+ 1 - 1
daemon/graphdriver/btrfs/dummy_unsupported.go

@@ -1,3 +1,3 @@
-// +build !linux !amd64
+// +build !linux !cgo
 
 package btrfs

+ 22 - 9
daemon/graphdriver/devmapper/README.md

@@ -7,7 +7,7 @@ module (dm-thinp) to implement CoW snapshots. For each devicemapper
 graph location (typically `/var/lib/docker/devicemapper`, $graph below)
 a thin pool is created based on two block devices, one for data and
 one for metadata.  By default these block devices are created
-automatically by using loopback mounts of automatically creates sparse
+automatically by using loopback mounts of automatically created sparse
 files.
 
 The default loopback files used are `$graph/devicemapper/data` and
@@ -15,15 +15,15 @@ The default loopback files used are `$graph/devicemapper/data` and
 from docker entities to the corresponding devicemapper volumes is
 stored in the `$graph/devicemapper/json` file (encoded as Json).
 
-In order to support multiple devicemapper graphs on a system the thin
+In order to support multiple devicemapper graphs on a system, the thin
 pool will be named something like: `docker-0:33-19478248-pool`, where
-the `0:30` part is the minor/major device nr and `19478248` is the
+the `0:33` part is the minor/major device nr and `19478248` is the
 inode number of the $graph directory.
 
-On the thin pool docker automatically creates a base thin device,
+On the thin pool, docker automatically creates a base thin device,
 called something like `docker-0:33-19478248-base` of a fixed
-size. This is automatically formated on creation and contains just an
-empty filesystem. This device is the base of all docker images and
+size. This is automatically formatted with an empty filesystem on
+creation. This device is the base of all docker images and
 containers. All base images are snapshots of this device and those
 images are then in turn used as snapshots for other images and
 eventually containers.
@@ -31,8 +31,8 @@ eventually containers.
 ### options
 
 The devicemapper backend supports some options that you can specify
-when starting the docker daemon using the --storage-opt flags.
-This uses the `dm` prefix and would be used somthing like `docker -d --storage-opt dm.foo=bar`.
+when starting the docker daemon using the `--storage-opt` flags.
+This uses the `dm` prefix and would be used something like `docker -d --storage-opt dm.foo=bar`.
 
 Here is the list of supported options:
 
@@ -43,7 +43,11 @@ Here is the list of supported options:
     10G. Note, thin devices are inherently "sparse", so a 10G device
     which is mostly empty doesn't use 10 GB of space on the
     pool. However, the filesystem will use more space for the empty
-    case the larger the device is.
+    case the larger the device is. **Warning**: This value affects the
+    system-wide "base" empty filesystem that may already be
+    initialized and inherited by pulled images.  Typically, a change
+    to this value will require additional steps to take effect: 1)
+    stop `docker -d`, 2) `rm -rf /var/lib/docker`, 3) start `docker -d`.
 
     Example use:
 
@@ -126,6 +130,15 @@ Here is the list of supported options:
 
     ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1``
 
+ *  `dm.blocksize`
+
+    Specifies a custom blocksize to use for the thin pool.  The default
+    blocksize is 64K.
+
+    Example use:
+
+    ``docker -d --storage-opt dm.blocksize=512K``
+
  *  `dm.blkdiscard`
 
     Enables or disables the use of blkdiscard when removing

+ 11 - 11
daemon/graphdriver/devmapper/attach_loopback.go

@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
@@ -7,7 +7,7 @@ import (
 	"os"
 	"syscall"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/pkg/log"
 )
 
 func stringToLoopName(src string) [LoNameSize]uint8 {
@@ -39,20 +39,20 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
 		fi, err := os.Stat(target)
 		if err != nil {
 			if os.IsNotExist(err) {
-				utils.Errorf("There are no more loopback devices available.")
+				log.Errorf("There are no more loopback devices available.")
 			}
 			return nil, ErrAttachLoopbackDevice
 		}
 
 		if fi.Mode()&os.ModeDevice != os.ModeDevice {
-			utils.Errorf("Loopback device %s is not a block device.", target)
+			log.Errorf("Loopback device %s is not a block device.", target)
 			continue
 		}
 
 		// OpenFile adds O_CLOEXEC
 		loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
 		if err != nil {
-			utils.Errorf("Error openning loopback device: %s", err)
+			log.Errorf("Error openning loopback device: %s", err)
 			return nil, ErrAttachLoopbackDevice
 		}
 
@@ -62,7 +62,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
 
 			// If the error is EBUSY, then try the next loopback
 			if err != syscall.EBUSY {
-				utils.Errorf("Cannot set up loopback device %s: %s", target, err)
+				log.Errorf("Cannot set up loopback device %s: %s", target, err)
 				return nil, ErrAttachLoopbackDevice
 			}
 
@@ -75,7 +75,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
 
 	// This can't happen, but let's be sure
 	if loopFile == nil {
-		utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
+		log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
 		return nil, ErrAttachLoopbackDevice
 	}
 
@@ -91,13 +91,13 @@ func attachLoopDevice(sparseName string) (loop *os.File, err error) {
 	// loopback from index 0.
 	startIndex, err := getNextFreeLoopbackIndex()
 	if err != nil {
-		utils.Debugf("Error retrieving the next available loopback: %s", err)
+		log.Debugf("Error retrieving the next available loopback: %s", err)
 	}
 
 	// OpenFile adds O_CLOEXEC
 	sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
 	if err != nil {
-		utils.Errorf("Error openning sparse file %s: %s", sparseName, err)
+		log.Errorf("Error openning sparse file %s: %s", sparseName, err)
 		return nil, ErrAttachLoopbackDevice
 	}
 	defer sparseFile.Close()
@@ -115,11 +115,11 @@ func attachLoopDevice(sparseName string) (loop *os.File, err error) {
 	}
 
 	if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
-		utils.Errorf("Cannot set up loopback device info: %s", err)
+		log.Errorf("Cannot set up loopback device info: %s", err)
 
 		// If the call failed, then free the loopback device
 		if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
-			utils.Errorf("Error while cleaning up the loopback device")
+			log.Errorf("Error while cleaning up the loopback device")
 		}
 		loopFile.Close()
 		return nil, ErrAttachLoopbackDevice

+ 62 - 51
daemon/graphdriver/devmapper/deviceset.go

@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
@@ -18,16 +18,18 @@ import (
 	"syscall"
 	"time"
 
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/units"
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/pkg/units"
-	"github.com/dotcloud/docker/utils"
 )
 
 var (
 	DefaultDataLoopbackSize     int64  = 100 * 1024 * 1024 * 1024
 	DefaultMetaDataLoopbackSize int64  = 2 * 1024 * 1024 * 1024
 	DefaultBaseFsSize           uint64 = 10 * 1024 * 1024 * 1024
+	DefaultThinpBlockSize       uint32 = 128 // 64K = 128 512b sectors
 )
 
 type DevInfo struct {
@@ -78,6 +80,7 @@ type DeviceSet struct {
 	dataDevice           string
 	metadataDevice       string
 	doBlkDiscard         bool
+	thinpBlockSize       uint32
 }
 
 type DiskUsage struct {
@@ -171,7 +174,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
 		if !os.IsNotExist(err) {
 			return "", err
 		}
-		utils.Debugf("Creating loopback file %s for device-manage use", filename)
+		log.Debugf("Creating loopback file %s for device-manage use", filename)
 		file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
 		if err != nil {
 			return "", err
@@ -249,7 +252,7 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) {
 }
 
 func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) {
-	utils.Debugf("registerDevice(%v, %v)", id, hash)
+	log.Debugf("registerDevice(%v, %v)", id, hash)
 	info := &DevInfo{
 		Hash:          hash,
 		DeviceId:      id,
@@ -275,7 +278,7 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev
 }
 
 func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error {
-	utils.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
+	log.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
 
 	if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 {
 		return nil
@@ -382,13 +385,13 @@ func (devices *DeviceSet) setupBaseImage() error {
 	}
 
 	if oldInfo != nil && !oldInfo.Initialized {
-		utils.Debugf("Removing uninitialized base image")
+		log.Debugf("Removing uninitialized base image")
 		if err := devices.deleteDevice(oldInfo); err != nil {
 			return err
 		}
 	}
 
-	utils.Debugf("Initializing base device-manager snapshot")
+	log.Debugf("Initializing base device-manager snapshot")
 
 	id := devices.nextDeviceId
 
@@ -400,14 +403,14 @@ func (devices *DeviceSet) setupBaseImage() error {
 	// Ids are 24bit, so wrap around
 	devices.nextDeviceId = (id + 1) & 0xffffff
 
-	utils.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize)
+	log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize)
 	info, err := devices.registerDevice(id, "", devices.baseFsSize)
 	if err != nil {
 		_ = deleteDevice(devices.getPoolDevName(), id)
 		return err
 	}
 
-	utils.Debugf("Creating filesystem on base device-manager snapshot")
+	log.Debugf("Creating filesystem on base device-manager snapshot")
 
 	if err = devices.activateDeviceIfNeeded(info); err != nil {
 		return err
@@ -445,7 +448,7 @@ func (devices *DeviceSet) log(level int, file string, line int, dmError int, mes
 		return // Ignore _LOG_DEBUG
 	}
 
-	utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
+	log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
 }
 
 func major(device uint64) uint64 {
@@ -510,7 +513,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
 	}
 
 	// Reload with the new block sizes
-	if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil {
+	if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil {
 		return fmt.Errorf("Unable to reload pool: %s", err)
 	}
 
@@ -549,13 +552,13 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 	//	- The target of this device is at major <maj> and minor <min>
 	//	- If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
 	devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
-	utils.Debugf("Generated prefix: %s", devices.devicePrefix)
+	log.Debugf("Generated prefix: %s", devices.devicePrefix)
 
 	// Check for the existence of the device <prefix>-pool
-	utils.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
+	log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
 	info, err := getInfo(devices.getPoolName())
 	if info == nil {
-		utils.Debugf("Error device getInfo: %s", err)
+		log.Debugf("Error device getInfo: %s", err)
 		return err
 	}
 
@@ -571,7 +574,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 
 	// If the pool doesn't exist, create it
 	if info.Exists == 0 {
-		utils.Debugf("Pool doesn't exist. Creating it.")
+		log.Debugf("Pool doesn't exist. Creating it.")
 
 		var (
 			dataFile     *os.File
@@ -593,7 +596,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 
 			data, err := devices.ensureImage("data", devices.dataLoopbackSize)
 			if err != nil {
-				utils.Debugf("Error device ensureImage (data): %s\n", err)
+				log.Debugf("Error device ensureImage (data): %s", err)
 				return err
 			}
 
@@ -624,7 +627,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 
 			metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize)
 			if err != nil {
-				utils.Debugf("Error device ensureImage (metadata): %s\n", err)
+				log.Debugf("Error device ensureImage (metadata): %s", err)
 				return err
 			}
 
@@ -640,7 +643,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 		}
 		defer metadataFile.Close()
 
-		if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil {
+		if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil {
 			return err
 		}
 	}
@@ -656,7 +659,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 	// Setup the base image
 	if doInit {
 		if err := devices.setupBaseImage(); err != nil {
-			utils.Debugf("Error device setupBaseImage: %s\n", err)
+			log.Debugf("Error device setupBaseImage: %s", err)
 			return err
 		}
 	}
@@ -683,7 +686,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
 	deviceId := devices.nextDeviceId
 
 	if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil {
-		utils.Debugf("Error creating snap device: %s\n", err)
+		log.Debugf("Error creating snap device: %s", err)
 		return err
 	}
 
@@ -692,7 +695,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
 
 	if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil {
 		deleteDevice(devices.getPoolDevName(), deviceId)
-		utils.Debugf("Error registering device: %s\n", err)
+		log.Debugf("Error registering device: %s", err)
 		return err
 	}
 	return nil
@@ -705,7 +708,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
 		// manually
 		if err := devices.activateDeviceIfNeeded(info); err == nil {
 			if err := BlockDeviceDiscard(info.DevName()); err != nil {
-				utils.Debugf("Error discarding block on device: %s (ignoring)\n", err)
+				log.Debugf("Error discarding block on device: %s (ignoring)", err)
 			}
 		}
 	}
@@ -713,13 +716,13 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
 	devinfo, _ := getInfo(info.Name())
 	if devinfo != nil && devinfo.Exists != 0 {
 		if err := devices.removeDeviceAndWait(info.Name()); err != nil {
-			utils.Debugf("Error removing device: %s\n", err)
+			log.Debugf("Error removing device: %s", err)
 			return err
 		}
 	}
 
 	if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil {
-		utils.Debugf("Error deleting device: %s\n", err)
+		log.Debugf("Error deleting device: %s", err)
 		return err
 	}
 
@@ -732,7 +735,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
 		devices.devicesLock.Lock()
 		devices.Devices[info.Hash] = info
 		devices.devicesLock.Unlock()
-		utils.Debugf("Error removing meta data: %s\n", err)
+		log.Debugf("Error removing meta data: %s", err)
 		return err
 	}
 
@@ -755,8 +758,8 @@ func (devices *DeviceSet) DeleteDevice(hash string) error {
 }
 
 func (devices *DeviceSet) deactivatePool() error {
-	utils.Debugf("[devmapper] deactivatePool()")
-	defer utils.Debugf("[devmapper] deactivatePool END")
+	log.Debugf("[devmapper] deactivatePool()")
+	defer log.Debugf("[devmapper] deactivatePool END")
 	devname := devices.getPoolDevName()
 	devinfo, err := getInfo(devname)
 	if err != nil {
@@ -770,13 +773,13 @@ func (devices *DeviceSet) deactivatePool() error {
 }
 
 func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
-	utils.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
-	defer utils.Debugf("[devmapper] deactivateDevice END")
+	log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
+	defer log.Debugf("[devmapper] deactivateDevice END")
 
 	// Wait for the unmount to be effective,
 	// by watching the value of Info.OpenCount for the device
 	if err := devices.waitClose(info); err != nil {
-		utils.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err)
+		log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err)
 	}
 
 	devinfo, err := getInfo(info.Name())
@@ -826,8 +829,8 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error {
 // a) the device registered at <device_set_prefix>-<hash> is removed,
 // or b) the 10 second timeout expires.
 func (devices *DeviceSet) waitRemove(devname string) error {
-	utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
-	defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
+	log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
+	defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
 	i := 0
 	for ; i < 1000; i += 1 {
 		devinfo, err := getInfo(devname)
@@ -837,7 +840,7 @@ func (devices *DeviceSet) waitRemove(devname string) error {
 			return nil
 		}
 		if i%100 == 0 {
-			utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
+			log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
 		}
 		if devinfo.Exists == 0 {
 			break
@@ -864,7 +867,7 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error {
 			return err
 		}
 		if i%100 == 0 {
-			utils.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
+			log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
 		}
 		if devinfo.OpenCount == 0 {
 			break
@@ -881,9 +884,9 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error {
 
 func (devices *DeviceSet) Shutdown() error {
 
-	utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
-	utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
-	defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
+	log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
+	log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
+	defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
 
 	var devs []*DevInfo
 
@@ -900,12 +903,12 @@ func (devices *DeviceSet) Shutdown() error {
 			// container. This means it'll go away from the global scope directly,
 			// and the device will be released when that container dies.
 			if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
-				utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err)
+				log.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err)
 			}
 
 			devices.Lock()
 			if err := devices.deactivateDevice(info); err != nil {
-				utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err)
+				log.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err)
 			}
 			devices.Unlock()
 		}
@@ -917,7 +920,7 @@ func (devices *DeviceSet) Shutdown() error {
 		info.lock.Lock()
 		devices.Lock()
 		if err := devices.deactivateDevice(info); err != nil {
-			utils.Debugf("Shutdown deactivate base , error: %s\n", err)
+			log.Debugf("Shutdown deactivate base , error: %s", err)
 		}
 		devices.Unlock()
 		info.lock.Unlock()
@@ -925,7 +928,7 @@ func (devices *DeviceSet) Shutdown() error {
 
 	devices.Lock()
 	if err := devices.deactivatePool(); err != nil {
-		utils.Debugf("Shutdown deactivate pool , error: %s\n", err)
+		log.Debugf("Shutdown deactivate pool , error: %s", err)
 	}
 	devices.Unlock()
 
@@ -989,8 +992,8 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
 }
 
 func (devices *DeviceSet) UnmountDevice(hash string) error {
-	utils.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
-	defer utils.Debugf("[devmapper] UnmountDevice END")
+	log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
+	defer log.Debugf("[devmapper] UnmountDevice END")
 
 	info, err := devices.lookupDevice(hash)
 	if err != nil {
@@ -1012,11 +1015,11 @@ func (devices *DeviceSet) UnmountDevice(hash string) error {
 		return nil
 	}
 
-	utils.Debugf("[devmapper] Unmount(%s)", info.mountPath)
+	log.Debugf("[devmapper] Unmount(%s)", info.mountPath)
 	if err := syscall.Unmount(info.mountPath, 0); err != nil {
 		return err
 	}
-	utils.Debugf("[devmapper] Unmount done")
+	log.Debugf("[devmapper] Unmount done")
 
 	if err := devices.deactivateDevice(info); err != nil {
 		return err
@@ -1159,30 +1162,31 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error
 		baseFsSize:           DefaultBaseFsSize,
 		filesystem:           "ext4",
 		doBlkDiscard:         true,
+		thinpBlockSize:       DefaultThinpBlockSize,
 	}
 
 	foundBlkDiscard := false
 	for _, option := range options {
-		key, val, err := utils.ParseKeyValueOpt(option)
+		key, val, err := parsers.ParseKeyValueOpt(option)
 		if err != nil {
 			return nil, err
 		}
 		key = strings.ToLower(key)
 		switch key {
 		case "dm.basesize":
-			size, err := units.FromHumanSize(val)
+			size, err := units.RAMInBytes(val)
 			if err != nil {
 				return nil, err
 			}
 			devices.baseFsSize = uint64(size)
 		case "dm.loopdatasize":
-			size, err := units.FromHumanSize(val)
+			size, err := units.RAMInBytes(val)
 			if err != nil {
 				return nil, err
 			}
 			devices.dataLoopbackSize = size
 		case "dm.loopmetadatasize":
-			size, err := units.FromHumanSize(val)
+			size, err := units.RAMInBytes(val)
 			if err != nil {
 				return nil, err
 			}
@@ -1206,6 +1210,13 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error
 			if err != nil {
 				return nil, err
 			}
+		case "dm.blocksize":
+			size, err := units.RAMInBytes(val)
+			if err != nil {
+				return nil, err
+			}
+			// convert to 512b sectors
+			devices.thinpBlockSize = uint32(size) >> 9
 		default:
 			return nil, fmt.Errorf("Unknown option %s\n", key)
 		}

+ 19 - 19
daemon/graphdriver/devmapper/devmapper.go

@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
@@ -9,7 +9,7 @@ import (
 	"runtime"
 	"syscall"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/pkg/log"
 )
 
 type DevmapperLogger interface {
@@ -198,7 +198,7 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64,
 func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
 	loopInfo, err := ioctlLoopGetStatus64(file.Fd())
 	if err != nil {
-		utils.Errorf("Error get loopback backing file: %s\n", err)
+		log.Errorf("Error get loopback backing file: %s", err)
 		return 0, 0, ErrGetLoopbackBackingFile
 	}
 	return loopInfo.loDevice, loopInfo.loInode, nil
@@ -206,7 +206,7 @@ func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
 
 func LoopbackSetCapacity(file *os.File) error {
 	if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
-		utils.Errorf("Error loopbackSetCapacity: %s", err)
+		log.Errorf("Error loopbackSetCapacity: %s", err)
 		return ErrLoopbackSetCapacity
 	}
 	return nil
@@ -246,7 +246,7 @@ func FindLoopDeviceFor(file *os.File) *os.File {
 
 func UdevWait(cookie uint) error {
 	if res := DmUdevWait(cookie); res != 1 {
-		utils.Debugf("Failed to wait on udev cookie %d", cookie)
+		log.Debugf("Failed to wait on udev cookie %d", cookie)
 		return ErrUdevWait
 	}
 	return nil
@@ -265,7 +265,7 @@ func logInit(logger DevmapperLogger) {
 
 func SetDevDir(dir string) error {
 	if res := DmSetDevDir(dir); res != 1 {
-		utils.Debugf("Error dm_set_dev_dir")
+		log.Debugf("Error dm_set_dev_dir")
 		return ErrSetDevDir
 	}
 	return nil
@@ -286,7 +286,7 @@ func RemoveDevice(name string) error {
 		return ErrCreateRemoveTask
 	}
 	if err := task.SetName(name); err != nil {
-		utils.Debugf("Can't set task name %s", name)
+		log.Debugf("Can't set task name %s", name)
 		return err
 	}
 	if err := task.Run(); err != nil {
@@ -298,7 +298,7 @@ func RemoveDevice(name string) error {
 func GetBlockDeviceSize(file *os.File) (uint64, error) {
 	size, err := ioctlBlkGetSize64(file.Fd())
 	if err != nil {
-		utils.Errorf("Error getblockdevicesize: %s", err)
+		log.Errorf("Error getblockdevicesize: %s", err)
 		return 0, ErrGetBlockSize
 	}
 	return uint64(size), nil
@@ -328,7 +328,7 @@ func BlockDeviceDiscard(path string) error {
 }
 
 // This is the programmatic example of "dmsetup create"
-func createPool(poolName string, dataFile, metadataFile *os.File) error {
+func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
 	task, err := createTask(DeviceCreate, poolName)
 	if task == nil {
 		return err
@@ -339,7 +339,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error {
 		return fmt.Errorf("Can't get data size %s", err)
 	}
 
-	params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing"
+	params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
 	if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil {
 		return fmt.Errorf("Can't add target %s", err)
 	}
@@ -358,7 +358,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error {
 	return nil
 }
 
-func reloadPool(poolName string, dataFile, metadataFile *os.File) error {
+func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
 	task, err := createTask(DeviceReload, poolName)
 	if task == nil {
 		return err
@@ -369,7 +369,7 @@ func reloadPool(poolName string, dataFile, metadataFile *os.File) error {
 		return fmt.Errorf("Can't get data size %s", err)
 	}
 
-	params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768"
+	params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
 	if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil {
 		return fmt.Errorf("Can't add target %s", err)
 	}
@@ -417,21 +417,21 @@ func getDriverVersion() (string, error) {
 func getStatus(name string) (uint64, uint64, string, string, error) {
 	task, err := createTask(DeviceStatus, name)
 	if task == nil {
-		utils.Debugf("getStatus: Error createTask: %s", err)
+		log.Debugf("getStatus: Error createTask: %s", err)
 		return 0, 0, "", "", err
 	}
 	if err := task.Run(); err != nil {
-		utils.Debugf("getStatus: Error Run: %s", err)
+		log.Debugf("getStatus: Error Run: %s", err)
 		return 0, 0, "", "", err
 	}
 
 	devinfo, err := task.GetInfo()
 	if err != nil {
-		utils.Debugf("getStatus: Error GetInfo: %s", err)
+		log.Debugf("getStatus: Error GetInfo: %s", err)
 		return 0, 0, "", "", err
 	}
 	if devinfo.Exists == 0 {
-		utils.Debugf("getStatus: Non existing device %s", name)
+		log.Debugf("getStatus: Non existing device %s", name)
 		return 0, 0, "", "", fmt.Errorf("Non existing device %s", name)
 	}
 
@@ -491,7 +491,7 @@ func resumeDevice(name string) error {
 }
 
 func createDevice(poolName string, deviceId *int) error {
-	utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
+	log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
 
 	for {
 		task, err := createTask(DeviceTargetMsg, poolName)
@@ -542,8 +542,8 @@ func deleteDevice(poolName string, deviceId int) error {
 }
 
 func removeDevice(name string) error {
-	utils.Debugf("[devmapper] removeDevice START")
-	defer utils.Debugf("[devmapper] removeDevice END")
+	log.Debugf("[devmapper] removeDevice START")
+	defer log.Debugf("[devmapper] removeDevice END")
 	task, err := createTask(DeviceRemove, name)
 	if task == nil {
 		return err

+ 1 - 1
daemon/graphdriver/devmapper/devmapper_log.go

@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 

+ 2 - 2
daemon/graphdriver/devmapper/devmapper_test.go

@@ -1,9 +1,9 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver/graphtest"
+	"github.com/docker/docker/daemon/graphdriver/graphtest"
 	"testing"
 )
 

Some files were not shown because too many files changed in this diff