From d10d0e568e44a4e55293610a5cac84359719baff Mon Sep 17 00:00:00 2001 From: imre Fitos Date: Wed, 14 Jan 2015 23:06:13 -0500 Subject: [PATCH 001/653] docs: remove NAT rule when removing bridge Signed-off-by: imre Fitos --- docs/sources/articles/networking.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md index 85e6222d8a..78dc599882 100644 --- a/docs/sources/articles/networking.md +++ b/docs/sources/articles/networking.md @@ -687,6 +687,7 @@ stopping the service and removing the interface: $ sudo service docker stop $ sudo ip link set dev docker0 down $ sudo brctl delbr docker0 + $ sudo iptables -t nat -F POSTROUTING Then, before starting the Docker service, create your own bridge and give it whatever configuration you want. Here we will create a simple @@ -708,6 +709,14 @@ illustrate the technique. inet 192.168.5.1/24 scope global bridge0 valid_lft forever preferred_lft forever + # Confirming outgoing NAT masquerade is setup + + $ sudo iptables -t nat -L -n + ... + Chain POSTROUTING (policy ACCEPT) + target prot opt source destination + MASQUERADE all -- 192.168.5.0/24 0.0.0.0/0 + # Tell Docker about it and restart (on Ubuntu) $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker From 88905793add88c8d5ff93f0e9b1edca5f012da33 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 15 Jan 2015 11:34:59 -0800 Subject: [PATCH 002/653] Build CMD/ENTRYPOINT cache strings properly Make sure that as we build the CMD/ENTRYPOINT cache strings that we don't treat ["echo","hi"] and ["echo hi"] as the same thing due to the fact that we're just doing a strcat on the array. Closes #10097 Signed-off-by: Doug Davis --- builder/dispatchers.go | 4 +-- integration-cli/docker_cli_build_test.go | 38 ++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/builder/dispatchers.go b/builder/dispatchers.go index 6108967c3b..2bc56e46ee 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -272,7 +272,7 @@ func cmd(b *Builder, args []string, attributes map[string]bool, original string) b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...) } - if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", b.Config.Cmd)); err != nil { + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", b.Config.Cmd)); err != nil { return err } @@ -312,7 +312,7 @@ func entrypoint(b *Builder, args []string, attributes map[string]bool, original b.Config.Cmd = nil } - if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", b.Config.Entrypoint)); err != nil { + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil { return err } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index ffc7594d0e..fe4167fdaf 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -3902,6 +3902,44 @@ func TestBuildCmdShDashC(t *testing.T) { logDone("build - cmd should have sh -c for non-json") } +func TestBuildCmdSpaces(t *testing.T) { + // Test to make sure that when we strcat arrays we take into account + // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't + // look the same + name := "testbuildcmdspaces" + defer deleteImages(name) + var id1 string + var id2 string + var err error + + if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { + t.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { + t.Fatal(err) + } + + if id1 == id2 { + t.Fatal("Should not have resulted in the same CMD") + } + + // Now do the same with ENTRYPOINT + if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { + t.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { + t.Fatal(err) + } + + if id1 == id2 { + t.Fatal("Should not have resulted in the same ENTRYPOINT") + } + + logDone("build - cmd with spaces") +} + func TestBuildCmdJSONNoShDashC(t *testing.T) { name := "testbuildcmdjson" defer deleteImages(name) From 457f2123739df7bc896008b6d796353aae7a0429 Mon Sep 17 00:00:00 2001 From: imre Fitos Date: Thu, 15 Jan 2015 21:32:38 -0500 Subject: [PATCH 003/653] start docker before checking for updated NAT rule Signed-off-by: imre Fitos --- docs/sources/articles/networking.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md index 78dc599882..78dc25a2bf 100644 --- a/docs/sources/articles/networking.md +++ b/docs/sources/articles/networking.md @@ -709,7 +709,12 @@ illustrate the technique. inet 192.168.5.1/24 scope global bridge0 valid_lft forever preferred_lft forever - # Confirming outgoing NAT masquerade is setup + # Tell Docker about it and restart (on Ubuntu) + + $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker + $ sudo service docker start + + # Confirming new outgoing NAT masquerade is setup $ sudo iptables -t nat -L -n ... @@ -717,10 +722,6 @@ illustrate the technique. target prot opt source destination MASQUERADE all -- 192.168.5.0/24 0.0.0.0/0 - # Tell Docker about it and restart (on Ubuntu) - - $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker - $ sudo service docker start The result should be that the Docker server starts successfully and is now prepared to bind containers to the new bridge. After pausing to From 6df0fdf91b426a4c3ec5fb4f53381cde4ee249f2 Mon Sep 17 00:00:00 2001 From: gdi2290 Date: Thu, 15 Jan 2015 00:38:24 -0800 Subject: [PATCH 004/653] Update AUTHORS file and .mailmap added `LC_ALL=C.UTF-8` due to osx http://www.inmotionhosting.com/support/website/ssh/speed-up-grep-searche s-with-lc-all Signed-off-by: Patrick Stapleton --- .mailmap | 21 ++++- AUTHORS | 171 ++++++++++++++++++++++++------------ project/generate-authors.sh | 2 +- 3 files changed, 133 insertions(+), 61 deletions(-) diff --git a/.mailmap b/.mailmap index 826fae0ead..00b698bba0 100644 --- a/.mailmap +++ b/.mailmap @@ -6,6 +6,24 @@ # # For explanation on this file format: man git-shortlog +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + @@ -58,7 +76,7 @@ Jean-Baptiste Dalido - + @@ -74,7 +92,6 @@ Sven Dowideit Sven Dowideit <¨SvenDowideit@home.org.au¨> Sven Dowideit Sven Dowideit -unclejack Alexandr Morozov diff --git a/AUTHORS b/AUTHORS index 3d3fe3c7bc..e6ec5d00f7 100644 --- a/AUTHORS +++ b/AUTHORS @@ -12,41 +12,46 @@ Adam Singer Aditya Adrian Mouat Adrien Folie +Ahmed Kamal Ahmet Alp Balkan +Aidan Hobson Sayers AJ Bowen +Al Tobey alambike Alan Thompson Albert Callarisa Albert Zhang Aleksa Sarai +Aleksandrs Fadins +Alex Gaynor +Alex Warhawk +Alexander Boyd Alexander Larsson +Alexander Morozov Alexander Shopov Alexandr Morozov Alexey Kotlyarov Alexey Shamrin -Alex Gaynor Alexis THOMAS -Alex Warhawk almoehi -Al Tobey -Álvaro Lázaro amangoel Amit Bakshi -AnandkumarPatel Anand Patil +AnandkumarPatel +Andre Dublin <81dublin@gmail.com> Andrea Luzzardi +Andrea Turli Andreas Köhler Andreas Savvides Andreas Tiefenthaler -Andrea Turli -Andre Dublin <81dublin@gmail.com> +Andrew C. Bodine Andrew Duckworth Andrew France Andrew Macgregor Andrew Munsell -Andrews Medina Andrew Weiss Andrew Williams +Andrews Medina Andrey Petrov Andrey Stolbovsky Andy Chambers @@ -56,6 +61,8 @@ Andy Kipp Andy Rothfusz Andy Smith Andy Wilson +Ankush Agarwal +Anthony Baire Anthony Bishopric Anton Löfgren Anton Nikitin @@ -72,11 +79,11 @@ Barry Allard Bartłomiej Piotrowski bdevloed Ben Firshman -Benjamin Atkin -Benoit Chesneau Ben Sargent Ben Toews Ben Wiklund +Benjamin Atkin +Benoit Chesneau Bernerd Schaefer Bert Goethals Bhiraj Butala @@ -113,18 +120,22 @@ Charles Hooper Charles Lindsay Charles Merriam Charlie Lewis +Chen Chao Chewey Chia-liang Kao Chris Alfonso Chris Armstrong -chrismckinnel Chris Snow Chris St. Pierre +chrismckinnel Christian Berendt +Christian Stefanescu ChristoperBiscardi -Christopher Currie -Christopher Rigor Christophe Troestler +Christopher Currie +Christopher Latham +Christopher Rigor +Chun Chen Ciro S. Costa Clayton Coleman Colin Dunklau @@ -132,15 +143,20 @@ Colin Rice Colin Walters Cory Forsyth cressie176 +Cristian Staretu Cruceru Calin-Cristian Daan van Berkel -Daehyeok.Mun +Daehyeok Mun Dafydd Crosby Dan Buch Dan Cotora Dan Griffin Dan Hirsch -Daniel, Dao Quang Minh +Dan Keder +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams Daniel Exner Daniel Farrell Daniel Garcia @@ -152,29 +168,27 @@ Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin -Dan Keder -Dan McPherson +Daniel, Dao Quang Minh Danny Berger Danny Yates -Dan Stine -Dan Walsh -Dan Williams Darren Coxall Darren Shepherd David Anderson David Calavera David Corking -Davide Ceretti David Gageot David Gebler +David Mat David Mcanulty David Pelaez David Röthlisberger David Sissitka +Davide Ceretti Dawn Chen decadent Deni Bertovic Derek +Derek Derek McGowan Deric Crago Deshi Xiao @@ -182,14 +196,16 @@ Dinesh Subhraveti Djibril Koné dkumor Dmitry Demeshchuk +Dmitry V. Krivenok Dolph Mathews Dominik Honnef +Don Kjer Don Spaulding Doug Davis doug tangren +Dr Nic Williams dragon788 Dražen Lučanin -Dr Nic Williams Dustin Sallings Edmund Wagner Eiichi Tsukata @@ -197,19 +213,22 @@ Eike Herzbach Eivind Uggedal Elias Probst Emil Hernvall +Emily Maier Emily Rose Eric Hanchrow Eric Lee Eric Myhre Eric Paris Eric Windisch +Erik Dubbelboer Erik Hollensbe Erik Inge Bolsø Erik Kristensen Erno Hopearuoho +Erwin van der Koogh Eugene Yakubovich eugenkrizo -evanderkoogh +Evan Carmi Evan Hazlett Evan Krall Evan Phoenix @@ -230,9 +249,9 @@ Francisco Carriedo Francisco Souza Frank Macreery Frank Rosquin +Fred Lifton Frederick F. Kautz IV Frederik Loeffert -Fred Lifton Freek Kalter Gabe Rosenhouse Gabor Nagy @@ -266,13 +285,15 @@ Hector Castro Henning Sprang Hobofan Hollie Teal +Hu Keping +Hu Tao Huayi Zhang Hugo Duncan Hunter Blanks -Hu Tao Huu Nguyen hyeongkyu.lee Ian Babrou +Ian Bishop Ian Bull Ian Main Ian Truslove @@ -284,8 +305,10 @@ Isabel Jimenez Isao Jonas Ivan Fraixedes Jack Danger Canty -jakedt +Jacob Atzen +Jacob Edelman Jake Moshenko +jakedt James Allen James Carr James DeFelice @@ -306,49 +329,52 @@ Jason Plum Jean-Baptiste Barth Jean-Baptiste Dalido Jean-Paul Calderone +Jean-Tiare Le Bigot +Jeff Anderson Jeff Lindsay -Jeffrey Bolle Jeff Welch +Jeffrey Bolle Jeremy Grosser -Jérôme Petazzoni Jesse Dubay Jessica Frazelle Jezeniel Zapanta Jilles Oldenbeuving Jim Alateras -Jimmy Cuadra Jim Perrin +Jimmy Cuadra Jiří Župka Joe Beda Joe Ferguson -Joel Handwell Joe Shaw Joe Van Dyk +Joel Friedly +Joel Handwell Joffrey F Johan Euphrosine -Johannes 'fish' Ziemke Johan Rydberg +Johannes 'fish' Ziemke John Costa John Feminella John Gardiner Myers John Gossman John OBrien III John Warwick +Jon Wedaman Jonas Pfenniger +Jonathan A. Sternberg Jonathan Boulle Jonathan Camp Jonathan McCrohan Jonathan Mueller Jonathan Pares Jonathan Rudenberg -Jon Wedaman Joost Cassee Jordan Arentsen Jordan Sissel Joseph Anthony Pasquale Holsten Joseph Hager -Josh Hawn Josh +Josh Hawn Josh Poimboeuf Josiah Kiehl JP @@ -360,6 +386,9 @@ Justin Force Justin Plock Justin Simonelis Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kamil Domanski Karan Lyons Karl Grzeszczak Kato Kazuyoshi @@ -367,14 +396,13 @@ Kawsar Saiyeed Keli Hu Ken Cochrane Ken ICHIKAWA +Kevin "qwazerty" Houdebert Kevin Clark Kevin J. Lynagh Kevin Menard -Kevin "qwazerty" Houdebert Kevin Wallace Keyvan Fatehi kies -kim0 Kim BKC Carlbacker Kimbro Staken Kiran Gangadharan @@ -382,6 +410,7 @@ knappe Kohei Tsuruta Konrad Kleine Konstantin Pelykh +Krasimir Georgiev krrg Kyle Conroy kyu @@ -397,13 +426,16 @@ Lei Jitang Len Weincier Leszek Kowalski Levi Gross +Lewis Marshall Lewis Peckover Liang-Chi Hsieh limsy Lokesh Mandvekar +Lorenz Leutgeb Louis Opter lukaspustina lukemarsden +Lénaïc Huard Madhu Venugopal Mahesh Tiyyagura Malte Janduda @@ -412,12 +444,13 @@ Manuel Meurer Manuel Woelker Marc Abramowitz Marc Kuo -Marco Hennings Marc Tamsky +Marco Hennings Marcus Farkas -marcuslinke +Marcus Linke Marcus Ramberg Marek Goldmann +Marianna Marius Voila Mark Allen Mark McGranaghan @@ -425,7 +458,9 @@ Marko Mikulicic Marko Tibold Markus Fix Martijn van Oosterhout +Martin Honermeyer Martin Redmond +Mary Anthony Mason Malone Mateusz Sulima Mathias Monnerville @@ -435,14 +470,14 @@ Matt Bachmann Matt Haggard Matthew Heon Matthew Mueller +Matthew Riley Matthias Klumpp Matthias Kühnle mattymo mattyw -Maxime Petazzoni -Maxim Treskin Max Shytikov -Médi-Rémi Hashim +Maxim Treskin +Maxime Petazzoni meejah Mengdi Gao Mert Yazıcıoğlu @@ -451,12 +486,14 @@ Michael Crosby Michael Gorsuch Michael Hudson-Doyle Michael Neale -Michaël Pailloncy Michael Prokop Michael Scharf Michael Stapelberg +Michael Steinert Michael Thies Michal Jemala +Michal Minar +Michaël Pailloncy Michiel@unhosted Miguel Angel Fernández Mike Chelen @@ -471,12 +508,15 @@ Morten Siebuhr Mrunal Patel mschurenko Mustafa Akın +Médi-Rémi Hashim Nan Monnand Deng Naoki Orii +Nate Eagleson Nate Jones Nathan Hsieh Nathan Kleyn Nathan LeClaire +Neal McBurnett Nelson Chen Niall O'Higgins Nicholas E. Rabenau @@ -491,18 +531,21 @@ NikolaMandic noducks Nolan Darilek nzwsch +O.S. Tezer OddBloke odk- Oguz Bilgic Oh Jinkyun Ole Reifschneider Olivier Gambier -O.S. Tezer pandrew +panticz Pascal Borreli Pascal Hartig Patrick Hemmer +Patrick Stapleton pattichen +Paul Paul Annesley Paul Bowsher Paul Hammond @@ -510,7 +553,6 @@ Paul Jimenez Paul Lietar Paul Morie Paul Nasrat -Paul Paul Weaver Pavlos Ratis Peter Bourgon @@ -518,16 +560,18 @@ Peter Braden Peter Ericson Peter Salvatore Peter Waller +Phil Phil Estes +Phil Spitler Philipp Weissensteiner Phillip Alexander -Phil Spitler -Phil Piergiuliano Bossi -Pierre-Alain RIVIERE Pierre +Pierre Wacrenier +Pierre-Alain RIVIERE Piotr Bogdan pixelistik +Porjo Prasanna Gautam Przemek Hejman pysqz @@ -547,6 +591,7 @@ Renato Riccieri Santos Zannon rgstephens Rhys Hiltner Richard Harvey +Richard Metzler Richo Healey Rick Bradley Rick van de Loo @@ -572,25 +617,26 @@ Ryan Fowler Ryan O'Donnell Ryan Seto Ryan Thomas +Rémy Greinhofer Sam Alba Sam Bailey Sam J Sharpe Sam Reis Sam Rijs +Sami Wagiaalla Samuel Andaya Samuel PHAN +Satnam Singh satoru Satoshi Amemiya Scott Bessler Scott Collier Scott Johnston +Scott Stamp Scott Walls Sean Cronin Sean P. Kane Sebastiaan van Stijn -Sébastien Luttringer -Sébastien -Sébastien Stormacq Senthil Kumar Selvaraj SeongJae Park Shane Canon @@ -598,12 +644,12 @@ shaunol Shawn Landden Shawn Siefkas Shih-Yuan Lee +Shishir Mahajan shuai-z Silas Sewell Simon Taranto Sindhu S Sjoerd Langkemper -s-ko Solomon Hykes Song Gao Soulou @@ -611,18 +657,23 @@ soulshake Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu +Srini Brahmaroutu Steeve Morin Stefan Praszalowicz Stephen Crosby Steven Burgess Steven Merrill -sudosurootdev Sven Dowideit Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq tang0th Tatsuki Sugiura +Tatsushi Inagaki Ted M. Young Tehmasp Chaudhri +Tejesh Mehta Thatcher Peskens Thermionix Thijs Terlouw @@ -636,11 +687,10 @@ Tianon Gravi Tibor Vass Tim Bosse Tim Hockin -Timothy Hobbs Tim Ruffles Tim Smith Tim Terhorst -tjmehta +Timothy Hobbs tjwebb123 tobe Tobias Bieniek @@ -648,10 +698,12 @@ Tobias Gesellchen Tobias Schmidt Tobias Schwab Todd Lunter -Tomasz Lipinski Tom Fotherby Tom Hulihan Tom Maaswinkel +Tomas Tomecek +Tomasz Lipinski +Tomasz Nurkiewicz Tommaso Visconti Tonis Tiigi Tony Daws @@ -662,7 +714,8 @@ Trent Ogren Tyler Brock Tzu-Jung Lee Ulysse Carion -unclejack +unknown +Vaidas Jablonskis vgeta Victor Coisne Victor Lyuboslavsky @@ -691,15 +744,15 @@ Walter Leibbrandt Walter Stanish Ward Vandewege WarheadsSE +Wayne Chang Wes Morgan Will Dietz +Will Rouesnel +Will Weaver William Delanoue William Henry William Riancho William Thurston -Will Rouesnel -Will Weaver -wyc Xiuming Chen xuzhaokui Yang Bai @@ -715,4 +768,6 @@ Zilin Du zimbatm Zoltan Tombol zqh +Álex González +Álvaro Lázaro 尹吉峰 diff --git a/project/generate-authors.sh b/project/generate-authors.sh index 0994662767..4bd60364a4 100755 --- a/project/generate-authors.sh +++ b/project/generate-authors.sh @@ -11,5 +11,5 @@ cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." # For how it is generated, see `project/generate-authors.sh`. EOH echo - git log --format='%aN <%aE>' | sort -uf + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf } > AUTHORS From 73baa673c72a49e9cccfde6e844c7dee2e478d8e Mon Sep 17 00:00:00 2001 From: imre Fitos Date: Sat, 17 Jan 2015 11:21:25 -0500 Subject: [PATCH 005/653] fix typo 'setup/set up' Signed-off-by: imre Fitos --- docs/sources/articles/networking.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md index 78dc25a2bf..029acab4ea 100644 --- a/docs/sources/articles/networking.md +++ b/docs/sources/articles/networking.md @@ -714,7 +714,7 @@ illustrate the technique. $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker $ sudo service docker start - # Confirming new outgoing NAT masquerade is setup + # Confirming new outgoing NAT masquerade is set up $ sudo iptables -t nat -L -n ... From cb81ed34a52420528281ccaa99e16c472f4f533a Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 19 Jan 2015 15:31:54 -0500 Subject: [PATCH 006/653] devicemapper: API for checking cookie support Signed-off-by: Vincent Batts --- pkg/devicemapper/devmapper.go | 7 +++++++ pkg/devicemapper/devmapper_wrapper.go | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go index 486ec15db7..764971c1ce 100644 --- a/pkg/devicemapper/devmapper.go +++ b/pkg/devicemapper/devmapper.go @@ -339,6 +339,13 @@ func UdevSetSyncSupport(enable bool) bool { return UdevSyncSupported() } +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + // Useful helper for cleanup func RemoveDevice(name string) error { log.Debugf("[devmapper] RemoveDevice START") diff --git a/pkg/devicemapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go index aff8446528..6427968d6f 100644 --- a/pkg/devicemapper/devmapper_wrapper.go +++ b/pkg/devicemapper/devmapper_wrapper.go @@ -109,6 +109,7 @@ var ( DmUdevWait = dmUdevWaitFct DmUdevSetSyncSupport = dmUdevSetSyncSupportFct DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct LogWithErrnoInit = logWithErrnoInitFct ) @@ -245,6 +246,10 @@ func dmUdevWaitFct(cookie uint) int { return int(C.dm_udev_wait(C.uint32_t(cookie))) } +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + func dmLogInitVerboseFct(level int) { C.dm_log_init_verbose(C.int(level)) } From 8ec6c692dba14b7d95acd2c56e4fd8b020151ce1 Mon Sep 17 00:00:00 2001 From: Hu Tao Date: Wed, 21 Jan 2015 02:13:58 +0800 Subject: [PATCH 007/653] Remove the assumption that the fist IP is the bridge IP The assumption is not true if user specifies an IP address other than the first IP, in that case the first IP address is never allocated to any container. Signed-off-by: Hu Tao --- daemon/networkdriver/ipallocator/allocator.go | 5 -- .../ipallocator/allocator_test.go | 55 ++++++++++--------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go index 40c3eb823e..a728d1bac4 100644 --- a/daemon/networkdriver/ipallocator/allocator.go +++ b/daemon/networkdriver/ipallocator/allocator.go @@ -23,11 +23,6 @@ func newAllocatedMap(network *net.IPNet) *allocatedMap { begin := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1)) end := big.NewInt(0).Sub(ipToBigInt(lastIP), big.NewInt(1)) - // if IPv4 network, then allocation range starts at begin + 1 because begin is bridge IP - if len(firstIP) == 4 { - begin = begin.Add(begin, big.NewInt(1)) - } - return &allocatedMap{ p: make(map[string]struct{}), begin: begin, diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/daemon/networkdriver/ipallocator/allocator_test.go index 8e0e853fac..8e0d8fdca6 100644 --- a/daemon/networkdriver/ipallocator/allocator_test.go +++ b/daemon/networkdriver/ipallocator/allocator_test.go @@ -61,7 +61,7 @@ func TestRequestNewIps(t *testing.T) { var ip net.IP var err error - for i := 2; i < 10; i++ { + for i := 1; i < 10; i++ { ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) @@ -167,7 +167,7 @@ func TestGetReleasedIp(t *testing.T) { t.Fatal(err) } - for i := 0; i < 252; i++ { + for i := 0; i < 253; i++ { _, err = RequestIP(network, nil) if err != nil { t.Fatal(err) @@ -278,23 +278,24 @@ func TestRequestSpecificIpV6(t *testing.T) { func TestIPAllocator(t *testing.T) { expectedIPs := []net.IP{ - 0: net.IPv4(127, 0, 0, 2), - 1: net.IPv4(127, 0, 0, 3), - 2: net.IPv4(127, 0, 0, 4), - 3: net.IPv4(127, 0, 0, 5), - 4: net.IPv4(127, 0, 0, 6), + 0: net.IPv4(127, 0, 0, 1), + 1: net.IPv4(127, 0, 0, 2), + 2: net.IPv4(127, 0, 0, 3), + 3: net.IPv4(127, 0, 0, 4), + 4: net.IPv4(127, 0, 0, 5), + 5: net.IPv4(127, 0, 0, 6), } gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") network := &net.IPNet{IP: gwIP, Mask: n.Mask} // Pool after initialisation (f = free, u = used) - // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // 1(f) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ - // Check that we get 5 IPs, from 127.0.0.2–127.0.0.6, in that + // Check that we get 6 IPs, from 127.0.0.1–127.0.0.6, in that // order. - for i := 0; i < 5; i++ { + for i := 0; i < 6; i++ { ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) @@ -303,27 +304,31 @@ func TestIPAllocator(t *testing.T) { assertIPEquals(t, expectedIPs[i], ip) } // Before loop begin - // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // 1(f) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // After i = 0 - // 2(u) - 3(f) - 4(f) - 5(f) - 6(f) + // 1(u) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // After i = 1 - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // 1(u) - 2(u) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // After i = 2 - // 2(u) - 3(u) - 4(u) - 5(f) - 6(f) + // 1(u) - 2(u) - 3(u) - 4(f) - 5(f) - 6(f) // ↑ // After i = 3 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(f) + // 1(u) - 2(u) - 3(u) - 4(u) - 5(f) - 6(f) // ↑ // After i = 4 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) + // 1(u) - 2(u) - 3(u) - 4(u) - 5(u) - 6(f) + // ↑ + + // After i = 5 + // 1(u) - 2(u) - 3(u) - 4(u) - 5(u) - 6(u) // ↑ // Check that there are no more IPs @@ -336,20 +341,20 @@ func TestIPAllocator(t *testing.T) { if err := ReleaseIP(network, expectedIPs[3]); err != nil { t.Fatal(err) } - // 2(u) - 3(u) - 4(u) - 5(f) - 6(u) + // 1(u) - 2(u) - 3(u) - 4(f) - 5(u) - 6(u) // ↑ if err := ReleaseIP(network, expectedIPs[2]); err != nil { t.Fatal(err) } - // 2(u) - 3(u) - 4(f) - 5(f) - 6(u) - // ↑ + // 1(u) - 2(u) - 3(f) - 4(f) - 5(u) - 6(u) + // ↑ if err := ReleaseIP(network, expectedIPs[4]); err != nil { t.Fatal(err) } - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ + // 1(u) - 2(u) - 3(f) - 4(f) - 5(f) - 6(u) + // ↑ // Make sure that IPs are reused in sequential order, starting // with the first released IP @@ -512,10 +517,10 @@ func TestAllocateDifferentSubnets(t *testing.T) { Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask } expectedIPs := []net.IP{ - 0: net.IPv4(192, 168, 0, 2), - 1: net.IPv4(192, 168, 0, 3), - 2: net.IPv4(127, 0, 0, 2), - 3: net.IPv4(127, 0, 0, 3), + 0: net.IPv4(192, 168, 0, 1), + 1: net.IPv4(192, 168, 0, 2), + 2: net.IPv4(127, 0, 0, 1), + 3: net.IPv4(127, 0, 0, 2), 4: net.ParseIP("2a00:1450::1"), 5: net.ParseIP("2a00:1450::2"), 6: net.ParseIP("2a00:1450::3"), From 51b5dc185b12c3447f1b69a705f7828242d10a2e Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 20 Jan 2015 13:53:24 -0700 Subject: [PATCH 008/653] Update emptyfs support to work properly if scratch is already an image Also, this decouples the emptyfs script from the busybox one -- they're now functionally separate thanks to the scratch no-op change. :+1: Signed-off-by: Andrew "Tianon" Page --- project/make/.ensure-busybox | 2 +- project/make/{.ensure-scratch => .ensure-emptyfs} | 11 ++++++----- project/make/test-integration-cli | 1 + 3 files changed, 8 insertions(+), 6 deletions(-) rename project/make/{.ensure-scratch => .ensure-emptyfs} (84%) diff --git a/project/make/.ensure-busybox b/project/make/.ensure-busybox index 3861faaf11..24ba3052db 100644 --- a/project/make/.ensure-busybox +++ b/project/make/.ensure-busybox @@ -1,8 +1,8 @@ #!/bin/bash +set -e if ! docker inspect busybox &> /dev/null; then if [ -d /docker-busybox ]; then - source "$(dirname "$BASH_SOURCE")/.ensure-scratch" ( set -x; docker build -t busybox /docker-busybox ) else ( set -x; docker pull busybox ) diff --git a/project/make/.ensure-scratch b/project/make/.ensure-emptyfs similarity index 84% rename from project/make/.ensure-scratch rename to project/make/.ensure-emptyfs index 8c421ed29e..8c33557b3d 100644 --- a/project/make/.ensure-scratch +++ b/project/make/.ensure-emptyfs @@ -1,12 +1,13 @@ #!/bin/bash +set -e -if ! docker inspect scratch &> /dev/null; then +if ! docker inspect emptyfs &> /dev/null; then # let's build a "docker save" tarball for "emptyfs" # see https://github.com/docker/docker/pull/5262 # and also https://github.com/docker/docker/issues/4242 - mkdir -p /docker-scratch + dir="$(mktemp -d)" ( - cd /docker-scratch + cd "$dir" echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 ( @@ -16,6 +17,6 @@ if ! docker inspect scratch &> /dev/null; then tar -cf layer.tar --files-from /dev/null ) ) - ( set -x; tar -cf /docker-scratch.tar -C /docker-scratch . ) - ( set -x; docker load --input /docker-scratch.tar ) + ( set -x; tar -cC "$dir" . | docker load ) + rm -rf "$dir" fi diff --git a/project/make/test-integration-cli b/project/make/test-integration-cli index 5dc7c42976..7c5e02437f 100644 --- a/project/make/test-integration-cli +++ b/project/make/test-integration-cli @@ -20,6 +20,7 @@ bundle_test_integration_cli() { sleep 2 source "$(dirname "$BASH_SOURCE")/.ensure-busybox" + source "$(dirname "$BASH_SOURCE")/.ensure-emptyfs" bundle_test_integration_cli }; then From 0cd30cf3991de1b491cac3c0cecdc976f0098f29 Mon Sep 17 00:00:00 2001 From: Pierre Wacrenier Date: Mon, 19 Jan 2015 01:27:14 +0100 Subject: [PATCH 009/653] Fix env.WriteTo count return Some calls like json.Encoder.Encode mask the number of bytes written to an io.Writer. The solution provides a wrapper io.Writer around the actual io.Writer that allows multiple calls to Write to be considered as one and allow access to this count. Signed-off-by: Pierre Wacrenier --- engine/env.go | 9 ++++++--- utils/utils.go | 21 +++++++++++++++++++++ utils/utils_test.go | 25 +++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 3 deletions(-) diff --git a/engine/env.go b/engine/env.go index a16dc35cd9..f370e95ed0 100644 --- a/engine/env.go +++ b/engine/env.go @@ -7,6 +7,8 @@ import ( "io" "strconv" "strings" + + "github.com/docker/docker/utils" ) type Env []string @@ -242,9 +244,10 @@ func (env *Env) Encode(dst io.Writer) error { return nil } -func (env *Env) WriteTo(dst io.Writer) (n int64, err error) { - // FIXME: return the number of bytes written to respect io.WriterTo - return 0, env.Encode(dst) +func (env *Env) WriteTo(dst io.Writer) (int64, error) { + wc := utils.NewWriteCounter(dst) + err := env.Encode(wc) + return wc.Count, err } func (env *Env) Import(src interface{}) (err error) { diff --git a/utils/utils.go b/utils/utils.go index a3e17b886d..6392298214 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -516,3 +516,24 @@ func ReadDockerIgnore(path string) ([]string, error) { } return excludes, nil } + +// Wrap a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/utils/utils_test.go b/utils/utils_test.go index ce304482b8..ef1f7af03b 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -1,7 +1,9 @@ package utils import ( + "bytes" "os" + "strings" "testing" ) @@ -97,3 +99,26 @@ func TestReadSymlinkedDirectoryToFile(t *testing.T) { t.Errorf("failed to remove symlink: %s", err) } } + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} From 3ca5af6b1ab6d1b75e014265e1ad4f5e45960826 Mon Sep 17 00:00:00 2001 From: Abin Shahab Date: Tue, 20 Jan 2015 00:32:29 +0000 Subject: [PATCH 010/653] Adds ipc namespace capability to lxc, and fixes tests. This fixes various tests by checking for non zero exit code, accounting for lxc-specific base-diffs, and by removing lxc specific environment vars. It also adds the --share-ipc option to lxc-start for shared ipc namespaces. Signed-off-by: Abin Shahab (github: ashahab-altiscale) Docker-DCO-1.1-Signed-off-by: Abin Shahab (github: ashahab-altiscale) --- daemon/execdriver/lxc/driver.go | 13 ++++- daemon/execdriver/lxc/lxc_template.go | 5 +- integration-cli/docker_cli_ps_test.go | 4 +- integration-cli/docker_cli_run_test.go | 70 +++++++++++++++++++++----- 4 files changed, 76 insertions(+), 16 deletions(-) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 44942b1fe0..060719a044 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -92,6 +92,17 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba "--share-net", c.Network.ContainerID, ) } + if c.Ipc != nil { + if c.Ipc.ContainerID != "" { + params = append(params, + "--share-ipc", c.Ipc.ContainerID, + ) + } else if c.Ipc.HostIpc { + params = append(params, + "--share-ipc", "1", + ) + } + } params = append(params, "--", @@ -141,7 +152,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba "unshare", "-m", "--", "/bin/sh", "-c", shellString, } } - + log.Debugf("lxc params %s", params) var ( name = params[0] arg = params[1:] diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index 99bb161985..dafd525125 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -126,7 +126,9 @@ lxc.network.ipv4 = {{.Network.Interface.IPAddress}}/{{.Network.Interface.IPPrefi {{if .Network.Interface.Gateway}} lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}} {{end}} - +{{if .Network.Interface.MacAddress}} +lxc.network.hwaddr = {{.Network.Interface.MacAddress}} +{{end}} {{if .ProcessConfig.Env}} lxc.utsname = {{getHostname .ProcessConfig.Env}} {{end}} @@ -194,6 +196,7 @@ func dropList(drops []string) ([]string, error) { func isDirectory(source string) string { f, err := os.Stat(source) + log.Debugf("dir: %s\n", source) if err != nil { if os.IsNotExist(err) { return "dir" diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go index fc22bc7d7e..75b39c61a1 100644 --- a/integration-cli/docker_cli_ps_test.go +++ b/integration-cli/docker_cli_ps_test.go @@ -288,7 +288,7 @@ func TestPsListContainersSize(t *testing.T) { func TestPsListContainersFilterStatus(t *testing.T) { // FIXME: this should test paused, but it makes things hang and its wonky // this is because paused containers can't be controlled by signals - + deleteAllContainers() // start exited container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") out, _, err := runCommandWithOutput(runCmd) @@ -304,7 +304,7 @@ func TestPsListContainersFilterStatus(t *testing.T) { } // start running container - runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 360") + runCmd = exec.Command(dockerBinary, "run", "-itd", "busybox") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 6da5b76565..31ed10ac5f 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -792,7 +792,13 @@ func TestRunEnvironment(t *testing.T) { t.Fatal(err, out) } - actualEnv := strings.Split(strings.TrimSpace(out), "\n") + actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") + actualEnv := []string{} + for i := range actualEnvLxc { + if actualEnvLxc[i] != "container=lxc" { + actualEnv = append(actualEnv, actualEnvLxc[i]) + } + } sort.Strings(actualEnv) goodEnv := []string{ @@ -831,7 +837,13 @@ func TestRunEnvironmentErase(t *testing.T) { t.Fatal(err, out) } - actualEnv := strings.Split(strings.TrimSpace(out), "\n") + actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") + actualEnv := []string{} + for i := range actualEnvLxc { + if actualEnvLxc[i] != "container=lxc" { + actualEnv = append(actualEnv, actualEnvLxc[i]) + } + } sort.Strings(actualEnv) goodEnv := []string{ @@ -863,7 +875,13 @@ func TestRunEnvironmentOverride(t *testing.T) { t.Fatal(err, out) } - actualEnv := strings.Split(strings.TrimSpace(out), "\n") + actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") + actualEnv := []string{} + for i := range actualEnvLxc { + if actualEnvLxc[i] != "container=lxc" { + actualEnv = append(actualEnv, actualEnvLxc[i]) + } + } sort.Strings(actualEnv) goodEnv := []string{ @@ -1969,13 +1987,44 @@ func TestRunWriteHostsFileAndNotCommit(t *testing.T) { if err != nil { t.Fatal(err, out) } - if len(strings.Trim(out, "\r\n")) != 0 { + + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) { t.Fatal("diff should be empty") } logDone("run - write to /etc/hosts and not commited") } +func eqToBaseDiff(out string, t *testing.T) bool { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") + out1, _, err := runCommandWithOutput(cmd) + cID := stripTrailingCharacters(out1) + cmd = exec.Command(dockerBinary, "diff", cID) + base_diff, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, base_diff) + } + base_arr := strings.Split(base_diff, "\n") + sort.Strings(base_arr) + out_arr := strings.Split(out, "\n") + sort.Strings(out_arr) + return sliceEq(base_arr, out_arr) +} + +func sliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + // Test for #2267 func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { defer deleteAllContainers() @@ -1998,7 +2047,7 @@ func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { if err != nil { t.Fatal(err, out) } - if len(strings.Trim(out, "\r\n")) != 0 { + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) { t.Fatal("diff should be empty") } @@ -2027,7 +2076,7 @@ func TestRunWriteResolvFileAndNotCommit(t *testing.T) { if err != nil { t.Fatal(err, out) } - if len(strings.Trim(out, "\r\n")) != 0 { + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) { t.Fatal("diff should be empty") } @@ -2737,10 +2786,7 @@ func TestRunUnknownCommand(t *testing.T) { cID = strings.TrimSpace(cID) runCmd = exec.Command(dockerBinary, "start", cID) - _, _, _, err = runCommandWithStdoutStderr(runCmd) - if err == nil { - t.Fatalf("Container should not have been able to start!") - } + _, _, _, _ = runCommandWithStdoutStderr(runCmd) runCmd = exec.Command(dockerBinary, "inspect", "--format={{.State.ExitCode}}", cID) rc, _, _, err2 := runCommandWithStdoutStderr(runCmd) @@ -2750,8 +2796,8 @@ func TestRunUnknownCommand(t *testing.T) { t.Fatalf("Error getting status of container: %v", err2) } - if rc != "-1" { - t.Fatalf("ExitCode(%v) was supposed to be -1", rc) + if rc == "0" { + t.Fatalf("ExitCode(%v) cannot be 0", rc) } logDone("run - Unknown Command") From 4bb113f24e1ea12ab7ab9ddfe0508e4f833445e5 Mon Sep 17 00:00:00 2001 From: Harald Albers Date: Mon, 19 Jan 2015 18:35:40 +0100 Subject: [PATCH 011/653] Add bash completions for daemon flags, simplify with extglob Implementing the deamon flags the traditional way introduced even more redundancy than usual because the same list of options with flags had to be added twice. This can be avoided by using variables in the case statements when using the extglob shell option. Signed-off-by: Harald Albers --- contrib/completion/bash/docker | 91 ++++++++++++++++++++++++++++++++-- 1 file changed, 86 insertions(+), 5 deletions(-) diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index 7dd23b8538..4891194bd8 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -104,6 +104,22 @@ __docker_pos_first_nonflag() { echo $counter } +# Transforms a multiline list of strings into a single line string +# with the words separated by "|". +# This is used to prepare arguments to __docker_pos_first_nonflag(). +__docker_to_alternatives() { + local parts=( $1 ) + local IFS='|' + echo "${parts[*]}" +} + +# Transforms a multiline list of options into an extglob pattern +# suitable for use in case statements. +__docker_to_extglob() { + local extglob=$( __docker_to_alternatives "$1" ) + echo "@($extglob)" +} + __docker_resolve_hostname() { command -v host >/dev/null 2>&1 || return COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) @@ -154,15 +170,47 @@ __docker_capabilities() { } _docker_docker() { + local boolean_options=" + --api-enable-cors + --daemon -d + --debug -D + --help -h + --icc + --ip-forward + --ip-masq + --iptables + --ipv6 + --selinux-enabled + --tls + --tlsverify + --version -v + " + case "$prev" in - -H) + --graph|-g) + _filedir -d + return + ;; + --log-level|-l) + COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) + return + ;; + --pidfile|-p|--tlscacert|--tlscert|--tlskey) + _filedir + return + ;; + --storage-driver|-s) + COMPREPLY=( $( compgen -W "aufs devicemapper btrfs overlay" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) + return + ;; + $main_options_with_args_glob ) return ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "-H" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "$boolean_options $main_options_with_args" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) @@ -561,6 +609,8 @@ _docker_run() { --sig-proxy " + local options_with_args_glob=$(__docker_to_extglob "$options_with_args") + case "$prev" in --add-host) case "$cur" in @@ -677,7 +727,7 @@ _docker_run() { __docker_containers_all return ;; - --cpuset|--cpu-shares|-c|--dns|--dns-search|--entrypoint|--expose|--hostname|-h|--lxc-conf|--mac-address|--memory|-m|--name|-n|--publish|-p|--user|-u|--workdir|-w) + $options_with_args_glob ) return ;; esac @@ -687,7 +737,7 @@ _docker_run() { COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) ;; *) - local counter=$( __docker_pos_first_nonflag $( echo $options_with_args | tr -d "\n" | tr " " "|" ) ) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids @@ -801,6 +851,9 @@ _docker_wait() { } _docker() { + local previous_extglob_setting=$(shopt -p extglob) + shopt -s extglob + local commands=( attach build @@ -841,6 +894,33 @@ _docker() { wait ) + local main_options_with_args=" + --bip + --bridge -b + --dns + --dns-search + --exec-driver -e + --fixed-cidr + --fixed-cidr-v6 + --graph -g + --group -G + --host -H + --insecure-registry + --ip + --label + --log-level -l + --mtu + --pidfile -p + --registry-mirror + --storage-driver -s + --storage-opt + --tlscacert + --tlscert + --tlskey + " + + local main_options_with_args_glob=$(__docker_to_extglob "$main_options_with_args") + COMPREPLY=() local cur prev words cword _get_comp_words_by_ref -n : cur prev words cword @@ -849,7 +929,7 @@ _docker() { local counter=1 while [ $counter -lt $cword ]; do case "${words[$counter]}" in - -H) + $main_options_with_args_glob ) (( counter++ )) ;; -*) @@ -867,6 +947,7 @@ _docker() { local completions_func=_docker_${command} declare -F $completions_func >/dev/null && $completions_func + eval "$previous_extglob_setting" return 0 } From 58c142bcfa2b9edce7efe72d393e9f90b9df9927 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 21 Jan 2015 12:11:53 -0800 Subject: [PATCH 012/653] Split API Version header when checking for v2 Since the Docker-Distribution-API-Version header value may contain multiple space delimited versions as well as many instances of the header key, the header value is now split on whitespace characters to iterate over all versions that may be listed in one instance of the header. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- registry/endpoint.go | 11 +++++++---- registry/endpoint_test.go | 4 +++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/registry/endpoint.go b/registry/endpoint.go index 72bcce4aae..de9c1f867a 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -231,10 +231,13 @@ func (e *Endpoint) pingV2() (RegistryInfo, error) { // Ensure it supports the v2 Registry API. var supportsV2 bool - for _, versionName := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { - if versionName == "registry/2.0" { - supportsV2 = true - break +HeaderLoop: + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { + for _, versionName := range strings.Fields(supportedVersions) { + if versionName == "registry/2.0" { + supportsV2 = true + break HeaderLoop + } } } diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go index ef2589994a..00c27b448c 100644 --- a/registry/endpoint_test.go +++ b/registry/endpoint_test.go @@ -42,7 +42,9 @@ func TestValidateEndpointAmbiguousAPIVersion(t *testing.T) { }) requireBasicAuthHandlerV2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") + // This mock server supports v2.0, v2.1, v42.0, and v100.0 + w.Header().Add("Docker-Distribution-API-Version", "registry/100.0 registry/42.0") + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0 registry/2.1") requireBasicAuthHandler.ServeHTTP(w, r) }) From d25a65375c880017ac0c516389b0b7afde810517 Mon Sep 17 00:00:00 2001 From: "Andrew C. Bodine" Date: Tue, 16 Dec 2014 15:06:35 -0800 Subject: [PATCH 013/653] Closes #9311 Handles container id/name collisions against daemon functionalities according to #8069 Signed-off-by: Andrew C. Bodine --- api/client/commands.go | 6 ++ api/server/server.go | 2 +- builder/internals.go | 12 ++-- daemon/attach.go | 6 +- daemon/changes.go | 41 +++++++----- daemon/commit.go | 6 +- daemon/container.go | 19 ++++-- daemon/copy.go | 27 ++++---- daemon/create.go | 6 +- daemon/daemon.go | 58 ++++++++++------ daemon/daemon_test.go | 101 ++++++++++++++++++++++++++++ daemon/delete.go | 8 +-- daemon/exec.go | 7 +- daemon/export.go | 31 +++++---- daemon/inspect.go | 100 +++++++++++++-------------- daemon/kill.go | 29 ++++---- daemon/list.go | 12 ++-- daemon/logs.go | 6 +- daemon/pause.go | 12 ++-- daemon/rename.go | 6 +- daemon/resize.go | 14 ++-- daemon/restart.go | 14 ++-- daemon/start.go | 8 +-- daemon/stop.go | 20 +++--- daemon/top.go | 108 +++++++++++++++--------------- daemon/volumes.go | 6 +- daemon/wait.go | 11 +-- integration/api_test.go | 2 +- integration/runtime_test.go | 75 ++++++++++++++------- integration/utils_test.go | 8 +-- pkg/truncindex/truncindex.go | 22 +++--- pkg/truncindex/truncindex_test.go | 5 ++ 32 files changed, 478 insertions(+), 310 deletions(-) create mode 100644 daemon/daemon_test.go diff --git a/api/client/commands.go b/api/client/commands.go index e42c6f26ed..f3775c95f0 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -862,6 +862,12 @@ func (cli *DockerCli) CmdInspect(args ...string) error { for _, name := range cmd.Args() { obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) if err != nil { + if strings.Contains(err.Error(), "Too many") { + fmt.Fprintf(cli.err, "Error: %s", err.Error()) + status = 1 + continue + } + obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) if err != nil { if strings.Contains(err.Error(), "No such") { diff --git a/api/server/server.go b/api/server/server.go index d5cdbd00cc..0184f5cf87 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1132,7 +1132,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp w.Header().Set("Content-Type", "application/x-tar") if err := job.Run(); err != nil { log.Errorf("%s", err.Error()) - if strings.Contains(strings.ToLower(err.Error()), "no such container") { + if strings.Contains(strings.ToLower(err.Error()), "no such id") { w.WriteHeader(http.StatusNotFound) } else if strings.Contains(err.Error(), "no such file or directory") { return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) diff --git a/builder/internals.go b/builder/internals.go index 830da72725..99484600db 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -86,9 +86,9 @@ func (b *Builder) commit(id string, autoCmd []string, comment string) error { } defer container.Unmount() } - container := b.Daemon.Get(id) - if container == nil { - return fmt.Errorf("An error occured while creating the container") + container, err := b.Daemon.Get(id) + if err != nil { + return err } // Note: Actually copy the struct @@ -709,7 +709,11 @@ func fixPermissions(source, destination string, uid, gid int, destExisted bool) func (b *Builder) clearTmp() { for c := range b.TmpContainers { - tmp := b.Daemon.Get(c) + tmp, err := b.Daemon.Get(c) + if err != nil { + fmt.Fprint(b.OutStream, err.Error()) + } + if err := b.Daemon.Destroy(tmp); err != nil { fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) return diff --git a/daemon/attach.go b/daemon/attach.go index 881b021e17..89af4360f4 100644 --- a/daemon/attach.go +++ b/daemon/attach.go @@ -28,9 +28,9 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { stderr = job.GetenvBool("stderr") ) - container := daemon.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } //logs diff --git a/daemon/changes.go b/daemon/changes.go index 1e5726eda8..faa4323145 100644 --- a/daemon/changes.go +++ b/daemon/changes.go @@ -9,24 +9,29 @@ func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] - if container := daemon.Get(name); container != nil { - outs := engine.NewTable("", 0) - changes, err := container.Changes() - if err != nil { - return job.Error(err) - } - for _, change := range changes { - out := &engine.Env{} - if err := out.Import(change); err != nil { - return job.Error(err) - } - outs.Add(out) - } - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - } else { - return job.Errorf("No such container: %s", name) + + container, error := daemon.Get(name) + if error != nil { + return job.Error(error) } + + outs := engine.NewTable("", 0) + changes, err := container.Changes() + if err != nil { + return job.Error(err) + } + + for _, change := range changes { + out := &engine.Env{} + if err := out.Import(change); err != nil { + return job.Error(err) + } + outs.Add(out) + } + + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK } diff --git a/daemon/commit.go b/daemon/commit.go index 06d0465adc..7c83c60cc4 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -12,9 +12,9 @@ func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { } name := job.Args[0] - container := daemon.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } var ( diff --git a/daemon/container.go b/daemon/container.go index c1c215ffee..45b07abb48 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -1113,7 +1113,12 @@ func (container *Container) updateParentsHosts() error { if ref.ParentID == "0" { continue } - c := container.daemon.Get(ref.ParentID) + + c, err := container.daemon.Get(ref.ParentID) + if err != nil { + log.Error(err) + } + if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() { log.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress) if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, ref.Name); err != nil { @@ -1382,9 +1387,9 @@ func (container *Container) GetMountLabel() string { func (container *Container) getIpcContainer() (*Container, error) { containerID := container.hostConfig.IpcMode.Container() - c := container.daemon.Get(containerID) - if c == nil { - return nil, fmt.Errorf("no such container to join IPC: %s", containerID) + c, err := container.daemon.Get(containerID) + if err != nil { + return nil, err } if !c.IsRunning() { return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) @@ -1399,9 +1404,9 @@ func (container *Container) getNetworkedContainer() (*Container, error) { if len(parts) != 2 { return nil, fmt.Errorf("no container specified to join network") } - nc := container.daemon.Get(parts[1]) - if nc == nil { - return nil, fmt.Errorf("no such container to join network: %s", parts[1]) + nc, err := container.daemon.Get(parts[1]) + if err != nil { + return nil, err } if !nc.IsRunning() { return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1]) diff --git a/daemon/copy.go b/daemon/copy.go index 9d18b010c0..d42f450fdb 100644 --- a/daemon/copy.go +++ b/daemon/copy.go @@ -16,18 +16,19 @@ func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status { resource = job.Args[1] ) - if container := daemon.Get(name); container != nil { - - data, err := container.Copy(resource) - if err != nil { - return job.Error(err) - } - defer data.Close() - - if _, err := io.Copy(job.Stdout, data); err != nil { - return job.Error(err) - } - return engine.StatusOK + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } - return job.Errorf("No such container: %s", name) + + data, err := container.Copy(resource) + if err != nil { + return job.Error(err) + } + defer data.Close() + + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Error(err) + } + return engine.StatusOK } diff --git a/daemon/create.go b/daemon/create.go index 785b0cc345..ebe4ccf02e 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -129,9 +129,9 @@ func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode run return label.DisableSecOpt(), nil } if ipcContainer := ipcMode.Container(); ipcContainer != "" { - c := daemon.Get(ipcContainer) - if c == nil { - return nil, fmt.Errorf("no such container to join IPC: %s", ipcContainer) + c, err := daemon.Get(ipcContainer) + if err != nil { + return nil, err } if !c.IsRunning() { return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer) diff --git a/daemon/daemon.go b/daemon/daemon.go index c03e9d7aa8..dd36c2889a 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -155,28 +155,39 @@ func (daemon *Daemon) Install(eng *engine.Engine) error { return nil } -// Get looks for a container by the specified ID or name, and returns it. -// If the container is not found, or if an error occurs, nil is returned. -func (daemon *Daemon) Get(name string) *Container { - id, err := daemon.idIndex.Get(name) - if err == nil { - return daemon.containers.Get(id) +// Get looks for a container with the provided prefix +func (daemon *Daemon) Get(prefix string) (*Container, error) { + if containerByID := daemon.containers.Get(prefix); containerByID != nil { + + // prefix is an exact match to a full container ID + return containerByID, nil } - if c, _ := daemon.GetByName(name); c != nil { - return c + // Either GetByName finds an entity matching prefix exactly, or it doesn't. + // Check value of containerByName and ignore any errors + containerByName, _ := daemon.GetByName(prefix) + containerId, indexError := daemon.idIndex.Get(prefix) + + if containerByName != nil { + + // prefix is an exact match to a full container Name + return containerByName, nil } - if err == truncindex.ErrDuplicateID { - log.Errorf("Short ID %s is ambiguous: please retry with more characters or use the full ID.\n", name) + if containerId != "" { + + // prefix is a fuzzy match to a container ID + return daemon.containers.Get(containerId), nil } - return nil + + return nil, indexError } // Exists returns a true if a container of the specified ID or name exists, // false otherwise. func (daemon *Daemon) Exists(id string) bool { - return daemon.Get(id) != nil + c, _ := daemon.Get(id) + return c != nil } func (daemon *Daemon) containerRoot(id string) string { @@ -715,9 +726,9 @@ func (daemon *Daemon) Children(name string) (map[string]*Container, error) { children := make(map[string]*Container) err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { - c := daemon.Get(e.ID()) - if c == nil { - return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p) + c, err := daemon.Get(e.ID()) + if err != nil { + return err } children[p] = c return nil @@ -754,7 +765,10 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig. if err != nil { return err } - child := daemon.Get(parts["name"]) + child, err := daemon.Get(parts["name"]) + if err != nil { + return err + } if child == nil { return fmt.Errorf("Could not get container for %s", parts["name"]) } @@ -1100,18 +1114,18 @@ func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) { } func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) { - c := daemon.Get(name) - if c == nil { - return nil, fmt.Errorf("no such container") + c, err := daemon.Get(name) + if err != nil { + return nil, err } ch := daemon.statsCollector.collect(c) return ch, nil } func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error { - c := daemon.Get(name) - if c == nil { - return fmt.Errorf("no such container") + c, err := daemon.Get(name) + if err != nil { + return err } daemon.statsCollector.unsubscribe(c, ch) return nil diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go new file mode 100644 index 0000000000..43030b6f9b --- /dev/null +++ b/daemon/daemon_test.go @@ -0,0 +1,101 @@ +package daemon + +import ( + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/truncindex" + "os" + "path" + "testing" +) + +// +// https://github.com/docker/docker/issues/8069 +// + +func TestGet(t *testing.T) { + c1 := &Container{ + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", + } + c2 := &Container{ + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", + } + c3 := &Container{ + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", + } + c4 := &Container{ + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + } + c5 := &Container{ + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", + } + + store := &contStore{ + s: map[string]*Container{ + c1.ID: c1, + c2.ID: c2, + c3.ID: c3, + c4.ID: c4, + c5.ID: c5, + }, + } + + index := truncindex.NewTruncIndex([]string{}) + index.Add(c1.ID) + index.Add(c2.ID) + index.Add(c3.ID) + index.Add(c4.ID) + index.Add(c5.ID) + + daemonTestDbPath := path.Join(os.TempDir(), "daemon_test.db") + graph, err := graphdb.NewSqliteConn(daemonTestDbPath) + if err != nil { + t.Fatalf("Failed to create daemon test sqlite database at %s", daemonTestDbPath) + } + graph.Set(c1.Name, c1.ID) + graph.Set(c2.Name, c2.ID) + graph.Set(c3.Name, c3.ID) + graph.Set(c4.Name, c4.ID) + graph.Set(c5.Name, c5.ID) + + daemon := &Daemon{ + containers: store, + idIndex: index, + containerGraph: graph, + } + + if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { + t.Fatal("Should explicitly match full container IDs") + } + + if container, _ := daemon.Get("75fb0b8009"); container != c4 { + t.Fatal("Should match a partial ID") + } + + if container, _ := daemon.Get("drunk_hawking"); container != c2 { + t.Fatal("Should match a full name") + } + + // c3.Name is a partial match for both c3.ID and c2.ID + if c, _ := daemon.Get("3cdbd1aa"); c != c3 { + t.Fatal("Should match a full name even though it collides with another container's ID") + } + + if container, _ := daemon.Get("d22d69a2b896"); container != c5 { + t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID") + } + + if _, err := daemon.Get("3cdbd1"); err == nil { + t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") + } + + if _, err := daemon.Get("nothing"); err == nil { + t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") + } + + os.Remove(daemonTestDbPath) +} diff --git a/daemon/delete.go b/daemon/delete.go index 59c7651785..78daa2aab0 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -17,10 +17,10 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { removeVolume := job.GetenvBool("removeVolume") removeLink := job.GetenvBool("removeLink") forceRemove := job.GetenvBool("forceRemove") - container := daemon.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } if removeLink { @@ -36,7 +36,7 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { if pe == nil { return job.Errorf("Cannot get parent %s for name %s", parent, name) } - parentContainer := daemon.Get(pe.ID()) + parentContainer, _ := daemon.Get(pe.ID()) if parentContainer != nil { parentContainer.DisableLink(n) diff --git a/daemon/exec.go b/daemon/exec.go index 8bb4e72d13..9881ed0896 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -97,10 +97,9 @@ func (d *Daemon) unregisterExecCommand(execConfig *execConfig) { } func (d *Daemon) getActiveContainer(name string) (*Container, error) { - container := d.Get(name) - - if container == nil { - return nil, fmt.Errorf("No such container: %s", name) + container, err := d.Get(name) + if err != nil { + return nil, err } if !container.IsRunning() { diff --git a/daemon/export.go b/daemon/export.go index bc0f14a3bb..859c80f9bb 100644 --- a/daemon/export.go +++ b/daemon/export.go @@ -11,20 +11,23 @@ func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status { return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] - if container := daemon.Get(name); container != nil { - data, err := container.Export() - if err != nil { - return job.Errorf("%s: %s", name, err) - } - defer data.Close() - // Stream the entire contents of the container (basically a volatile snapshot) - if _, err := io.Copy(job.Stdout, data); err != nil { - return job.Errorf("%s: %s", name, err) - } - // FIXME: factor job-specific LogEvent to engine.Job.Run() - container.LogEvent("export") - return engine.StatusOK + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } - return job.Errorf("No such container: %s", name) + + data, err := container.Export() + if err != nil { + return job.Errorf("%s: %s", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Errorf("%s: %s", name, err) + } + // FIXME: factor job-specific LogEvent to engine.Job.Run() + container.LogEvent("export") + return engine.StatusOK } diff --git a/daemon/inspect.go b/daemon/inspect.go index 37d00573bc..96095c9fa0 100644 --- a/daemon/inspect.go +++ b/daemon/inspect.go @@ -13,60 +13,62 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] - if container := daemon.Get(name); container != nil { - container.Lock() - defer container.Unlock() - if job.GetenvBool("raw") { - b, err := json.Marshal(&struct { - *Container - HostConfig *runconfig.HostConfig - }{container, container.hostConfig}) - if err != nil { - return job.Error(err) - } - job.Stdout.Write(b) - return engine.StatusOK - } + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) + } - out := &engine.Env{} - out.SetJson("Id", container.ID) - out.SetAuto("Created", container.Created) - out.SetJson("Path", container.Path) - out.SetList("Args", container.Args) - out.SetJson("Config", container.Config) - out.SetJson("State", container.State) - out.Set("Image", container.ImageID) - out.SetJson("NetworkSettings", container.NetworkSettings) - out.Set("ResolvConfPath", container.ResolvConfPath) - out.Set("HostnamePath", container.HostnamePath) - out.Set("HostsPath", container.HostsPath) - out.SetJson("Name", container.Name) - out.SetInt("RestartCount", container.RestartCount) - out.Set("Driver", container.Driver) - out.Set("ExecDriver", container.ExecDriver) - out.Set("MountLabel", container.MountLabel) - out.Set("ProcessLabel", container.ProcessLabel) - out.SetJson("Volumes", container.Volumes) - out.SetJson("VolumesRW", container.VolumesRW) - out.SetJson("AppArmorProfile", container.AppArmorProfile) - - out.SetList("ExecIDs", container.GetExecIDs()) - - if children, err := daemon.Children(container.Name); err == nil { - for linkAlias, child := range children { - container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) - } - } - - out.SetJson("HostConfig", container.hostConfig) - - container.hostConfig.Links = nil - if _, err := out.WriteTo(job.Stdout); err != nil { + container.Lock() + defer container.Unlock() + if job.GetenvBool("raw") { + b, err := json.Marshal(&struct { + *Container + HostConfig *runconfig.HostConfig + }{container, container.hostConfig}) + if err != nil { return job.Error(err) } + job.Stdout.Write(b) return engine.StatusOK } - return job.Errorf("No such container: %s", name) + + out := &engine.Env{} + out.SetJson("Id", container.ID) + out.SetAuto("Created", container.Created) + out.SetJson("Path", container.Path) + out.SetList("Args", container.Args) + out.SetJson("Config", container.Config) + out.SetJson("State", container.State) + out.Set("Image", container.ImageID) + out.SetJson("NetworkSettings", container.NetworkSettings) + out.Set("ResolvConfPath", container.ResolvConfPath) + out.Set("HostnamePath", container.HostnamePath) + out.Set("HostsPath", container.HostsPath) + out.SetJson("Name", container.Name) + out.SetInt("RestartCount", container.RestartCount) + out.Set("Driver", container.Driver) + out.Set("ExecDriver", container.ExecDriver) + out.Set("MountLabel", container.MountLabel) + out.Set("ProcessLabel", container.ProcessLabel) + out.SetJson("Volumes", container.Volumes) + out.SetJson("VolumesRW", container.VolumesRW) + out.SetJson("AppArmorProfile", container.AppArmorProfile) + + out.SetList("ExecIDs", container.GetExecIDs()) + + if children, err := daemon.Children(container.Name); err == nil { + for linkAlias, child := range children { + container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + } + + out.SetJson("HostConfig", container.hostConfig) + + container.hostConfig.Links = nil + if _, err := out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK } func (daemon *Daemon) ContainerExecInspect(job *engine.Job) engine.Status { diff --git a/daemon/kill.go b/daemon/kill.go index f5f5897c88..84094f8fbf 100644 --- a/daemon/kill.go +++ b/daemon/kill.go @@ -38,22 +38,23 @@ func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status { } } - if container := daemon.Get(name); container != nil { - // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) - if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { - if err := container.Kill(); err != nil { - return job.Errorf("Cannot kill container %s: %s", name, err) - } - container.LogEvent("kill") - } else { - // Otherwise, just send the requested signal - if err := container.KillSig(int(sig)); err != nil { - return job.Errorf("Cannot kill container %s: %s", name, err) - } - // FIXME: Add event for signals + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + if err := container.Kill(); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) } + container.LogEvent("kill") } else { - return job.Errorf("No such container: %s", name) + // Otherwise, just send the requested signal + if err := container.KillSig(int(sig)); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) + } + // FIXME: Add event for signals } return engine.StatusOK } diff --git a/daemon/list.go b/daemon/list.go index 5197d9986c..676ff53f27 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -61,16 +61,16 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { var beforeCont, sinceCont *Container if before != "" { - beforeCont = daemon.Get(before) - if beforeCont == nil { - return job.Error(fmt.Errorf("Could not find container with name or id %s", before)) + beforeCont, err = daemon.Get(before) + if err != nil { + return job.Error(err) } } if since != "" { - sinceCont = daemon.Get(since) - if sinceCont == nil { - return job.Error(fmt.Errorf("Could not find container with name or id %s", since)) + sinceCont, err = daemon.Get(since) + if err != nil { + return job.Error(err) } } diff --git a/daemon/logs.go b/daemon/logs.go index 6c9373f737..28ff9be4ee 100644 --- a/daemon/logs.go +++ b/daemon/logs.go @@ -40,9 +40,9 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { if tail == "" { tail = "all" } - container := daemon.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { diff --git a/daemon/pause.go b/daemon/pause.go index 0e4323d9a8..af943de103 100644 --- a/daemon/pause.go +++ b/daemon/pause.go @@ -9,9 +9,9 @@ func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] - container := daemon.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } if err := container.Pause(); err != nil { return job.Errorf("Cannot pause container %s: %s", name, err) @@ -25,9 +25,9 @@ func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] - container := daemon.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } if err := container.Unpause(); err != nil { return job.Errorf("Cannot unpause container %s: %s", name, err) diff --git a/daemon/rename.go b/daemon/rename.go index 1dedc7d3a7..3d315252ae 100644 --- a/daemon/rename.go +++ b/daemon/rename.go @@ -11,9 +11,9 @@ func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status { oldName := job.Args[0] newName := job.Args[1] - container := daemon.Get(oldName) - if container == nil { - return job.Errorf("No such container: %s", oldName) + container, err := daemon.Get(oldName) + if err != nil { + return job.Error(err) } oldName = container.Name diff --git a/daemon/resize.go b/daemon/resize.go index 68c070370a..860f79eba4 100644 --- a/daemon/resize.go +++ b/daemon/resize.go @@ -19,14 +19,14 @@ func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - - if container := daemon.Get(name); container != nil { - if err := container.Resize(height, width); err != nil { - return job.Error(err) - } - return engine.StatusOK + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } - return job.Errorf("No such container: %s", name) + if err := container.Resize(height, width); err != nil { + return job.Error(err) + } + return engine.StatusOK } func (daemon *Daemon) ContainerExecResize(job *engine.Job) engine.Status { diff --git a/daemon/restart.go b/daemon/restart.go index bcc057156d..bcde628d38 100644 --- a/daemon/restart.go +++ b/daemon/restart.go @@ -15,13 +15,13 @@ func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status { if job.EnvExists("t") { t = job.GetenvInt("t") } - if container := daemon.Get(name); container != nil { - if err := container.Restart(int(t)); err != nil { - return job.Errorf("Cannot restart container %s: %s\n", name, err) - } - container.LogEvent("restart") - } else { - return job.Errorf("No such container: %s\n", name) + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } + if err := container.Restart(int(t)); err != nil { + return job.Errorf("Cannot restart container %s: %s\n", name, err) + } + container.LogEvent("restart") return engine.StatusOK } diff --git a/daemon/start.go b/daemon/start.go index d6655189d7..4a35555dca 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -14,12 +14,12 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { return job.Errorf("Usage: %s container_id", job.Name) } var ( - name = job.Args[0] - container = daemon.Get(name) + name = job.Args[0] ) - if container == nil { - return job.Errorf("No such container: %s", name) + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } if container.IsPaused() { diff --git a/daemon/stop.go b/daemon/stop.go index 1a098a1ad3..e2f1d284a8 100644 --- a/daemon/stop.go +++ b/daemon/stop.go @@ -15,16 +15,16 @@ func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status { if job.EnvExists("t") { t = job.GetenvInt("t") } - if container := daemon.Get(name); container != nil { - if !container.IsRunning() { - return job.Errorf("Container already stopped") - } - if err := container.Stop(int(t)); err != nil { - return job.Errorf("Cannot stop container %s: %s\n", name, err) - } - container.LogEvent("stop") - } else { - return job.Errorf("No such container: %s\n", name) + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } + if !container.IsRunning() { + return job.Errorf("Container already stopped") + } + if err := container.Stop(int(t)); err != nil { + return job.Errorf("Cannot stop container %s: %s\n", name, err) + } + container.LogEvent("stop") return engine.StatusOK } diff --git a/daemon/top.go b/daemon/top.go index 4d916ee5dc..782cc83dcf 100644 --- a/daemon/top.go +++ b/daemon/top.go @@ -21,59 +21,59 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status { psArgs = job.Args[1] } - if container := daemon.Get(name); container != nil { - if !container.IsRunning() { - return job.Errorf("Container %s is not running", name) - } - pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) - if err != nil { - return job.Error(err) - } - output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() - if err != nil { - return job.Errorf("Error running ps: %s", err) - } - - lines := strings.Split(string(output), "\n") - header := strings.Fields(lines[0]) - out := &engine.Env{} - out.SetList("Titles", header) - - pidIndex := -1 - for i, name := range header { - if name == "PID" { - pidIndex = i - } - } - if pidIndex == -1 { - return job.Errorf("Couldn't find PID field in ps output") - } - - processes := [][]string{} - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - fields := strings.Fields(line) - p, err := strconv.Atoi(fields[pidIndex]) - if err != nil { - return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) - } - - for _, pid := range pids { - if pid == p { - // Make sure number of fields equals number of header titles - // merging "overhanging" fields - process := fields[:len(header)-1] - process = append(process, strings.Join(fields[len(header)-1:], " ")) - processes = append(processes, process) - } - } - } - out.SetJson("Processes", processes) - out.WriteTo(job.Stdout) - return engine.StatusOK - + container, err := daemon.Get(name) + if err != nil { + return job.Error(err) } - return job.Errorf("No such container: %s", name) + if !container.IsRunning() { + return job.Errorf("Container %s is not running", name) + } + pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) + if err != nil { + return job.Error(err) + } + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return job.Errorf("Error running ps: %s", err) + } + + lines := strings.Split(string(output), "\n") + header := strings.Fields(lines[0]) + out := &engine.Env{} + out.SetList("Titles", header) + + pidIndex := -1 + for i, name := range header { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return job.Errorf("Couldn't find PID field in ps output") + } + + processes := [][]string{} + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(header)-1] + process = append(process, strings.Join(fields[len(header)-1:], " ")) + processes = append(processes, process) + } + } + } + out.SetJson("Processes", processes) + out.WriteTo(job.Stdout) + return engine.StatusOK } diff --git a/daemon/volumes.go b/daemon/volumes.go index 7b49733839..c6f1b9930a 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -266,9 +266,9 @@ func (container *Container) applyVolumesFrom() error { continue } - c := container.daemon.Get(id) - if c == nil { - return fmt.Errorf("container %s not found, impossible to mount its volumes", id) + c, err := container.daemon.Get(id) + if err != nil { + return err } var ( diff --git a/daemon/wait.go b/daemon/wait.go index a1f657c353..e2747a3e42 100644 --- a/daemon/wait.go +++ b/daemon/wait.go @@ -11,10 +11,11 @@ func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status { return job.Errorf("Usage: %s", job.Name) } name := job.Args[0] - if container := daemon.Get(name); container != nil { - status, _ := container.WaitStop(-1 * time.Second) - job.Printf("%d\n", status) - return engine.StatusOK + container, err := daemon.Get(name) + if err != nil { + return job.Errorf("%s: %s", job.Name, err.Error()) } - return job.Errorf("%s: No such container: %s", job.Name, name) + status, _ := container.WaitStop(-1 * time.Second) + job.Printf("%d\n", status) + return engine.StatusOK } diff --git a/integration/api_test.go b/integration/api_test.go index ab2c3070b4..8daa4d46f3 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -325,7 +325,7 @@ func TestPostCreateNull(t *testing.T) { containerAssertExists(eng, containerID, t) - c := daemon.Get(containerID) + c, _ := daemon.Get(containerID) if c.Config.Cpuset != "" { t.Fatalf("Cpuset should have been empty - instead its:" + c.Config.Cpuset) } diff --git a/integration/runtime_test.go b/integration/runtime_test.go index a436995fd3..73ed793043 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -282,12 +282,12 @@ func TestDaemonCreate(t *testing.T) { } // Make sure we can get the container with Get() - if daemon.Get(container.ID) == nil { + if _, err := daemon.Get(container.ID); err != nil { t.Errorf("Unable to get newly created container") } // Make sure it is the right container - if daemon.Get(container.ID) != container { + if c, _ := daemon.Get(container.ID); c != container { t.Errorf("Get() returned the wrong container") } @@ -383,8 +383,8 @@ func TestDestroy(t *testing.T) { } // Make sure daemon.Get() refuses to return the unexisting container - if daemon.Get(container.ID) != nil { - t.Errorf("Unable to get newly created container") + if c, _ := daemon.Get(container.ID); c != nil { + t.Errorf("Got a container that should not exist") } // Test double destroy @@ -407,16 +407,16 @@ func TestGet(t *testing.T) { container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) defer daemon.Destroy(container3) - if daemon.Get(container1.ID) != container1 { - t.Errorf("Get(test1) returned %v while expecting %v", daemon.Get(container1.ID), container1) + if c, _ := daemon.Get(container1.ID); c != container1 { + t.Errorf("Get(test1) returned %v while expecting %v", c, container1) } - if daemon.Get(container2.ID) != container2 { - t.Errorf("Get(test2) returned %v while expecting %v", daemon.Get(container2.ID), container2) + if c, _ := daemon.Get(container2.ID); c != container2 { + t.Errorf("Get(test2) returned %v while expecting %v", c, container2) } - if daemon.Get(container3.ID) != container3 { - t.Errorf("Get(test3) returned %v while expecting %v", daemon.Get(container3.ID), container3) + if c, _ := daemon.Get(container3.ID); c != container3 { + t.Errorf("Get(test3) returned %v while expecting %v", c, container3) } } @@ -485,9 +485,9 @@ func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daem t.Fatal(err) } - container := daemon.Get(id) - if container == nil { - t.Fatalf("Couldn't fetch test container %s", id) + container, err := daemon.Get(id) + if err != nil { + t.Fatal(err) } setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { @@ -646,8 +646,8 @@ func TestRestore(t *testing.T) { if runningCount != 0 { t.Fatalf("Expected 0 container alive, %d found", runningCount) } - container3 := daemon2.Get(container1.ID) - if container3 == nil { + container3, err := daemon2.Get(container1.ID) + if err != nil { t.Fatal("Unable to Get container") } if err := container3.Run(); err != nil { @@ -666,16 +666,21 @@ func TestDefaultContainerName(t *testing.T) { t.Fatal(err) } - container := daemon.Get(createNamedTestContainer(eng, config, t, "some_name")) + container, err := daemon.Get(createNamedTestContainer(eng, config, t, "some_name")) + if err != nil { + t.Fatal(err) + } containerID := container.ID if container.Name != "/some_name" { t.Fatalf("Expect /some_name got %s", container.Name) } - if c := daemon.Get("/some_name"); c == nil { + c, err := daemon.Get("/some_name") + if err != nil { t.Fatalf("Couldn't retrieve test container as /some_name") - } else if c.ID != containerID { + } + if c.ID != containerID { t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) } } @@ -690,14 +695,17 @@ func TestRandomContainerName(t *testing.T) { t.Fatal(err) } - container := daemon.Get(createTestContainer(eng, config, t)) + container, err := daemon.Get(createTestContainer(eng, config, t)) + if err != nil { + t.Fatal(err) + } containerID := container.ID if container.Name == "" { t.Fatalf("Expected not empty container name") } - if c := daemon.Get(container.Name); c == nil { + if c, err := daemon.Get(container.Name); err != nil { log.Fatalf("Could not lookup container %s by its name", container.Name) } else if c.ID != containerID { log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) @@ -737,13 +745,16 @@ func TestContainerNameValidation(t *testing.T) { t.Fatal(err) } - container := daemon.Get(engine.Tail(outputBuffer, 1)) + container, err := daemon.Get(engine.Tail(outputBuffer, 1)) + if err != nil { + t.Fatal(err) + } if container.Name != "/"+test.Name { t.Fatalf("Expect /%s got %s", test.Name, container.Name) } - if c := daemon.Get("/" + test.Name); c == nil { + if c, err := daemon.Get("/" + test.Name); err != nil { t.Fatalf("Couldn't retrieve test container as /%s", test.Name) } else if c.ID != container.ID { t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID) @@ -762,7 +773,10 @@ func TestLinkChildContainer(t *testing.T) { t.Fatal(err) } - container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) + container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) + if err != nil { + t.Fatal(err) + } webapp, err := daemon.GetByName("/webapp") if err != nil { @@ -778,7 +792,10 @@ func TestLinkChildContainer(t *testing.T) { t.Fatal(err) } - childContainer := daemon.Get(createTestContainer(eng, config, t)) + childContainer, err := daemon.Get(createTestContainer(eng, config, t)) + if err != nil { + t.Fatal(err) + } if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { t.Fatal(err) @@ -804,7 +821,10 @@ func TestGetAllChildren(t *testing.T) { t.Fatal(err) } - container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) + container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) + if err != nil { + t.Fatal(err) + } webapp, err := daemon.GetByName("/webapp") if err != nil { @@ -820,7 +840,10 @@ func TestGetAllChildren(t *testing.T) { t.Fatal(err) } - childContainer := daemon.Get(createTestContainer(eng, config, t)) + childContainer, err := daemon.Get(createTestContainer(eng, config, t)) + if err != nil { + t.Fatal(err) + } if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { t.Fatal(err) diff --git a/integration/utils_test.go b/integration/utils_test.go index 32ca8e0d62..61a156e476 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -117,7 +117,7 @@ func containerAssertExists(eng *engine.Engine, id string, t Fataler) { func containerAssertNotExists(eng *engine.Engine, id string, t Fataler) { daemon := mkDaemonFromEngine(eng, t) - if c := daemon.Get(id); c != nil { + if c, _ := daemon.Get(id); c != nil { t.Fatal(fmt.Errorf("Container %s should not exist", id)) } } @@ -142,9 +142,9 @@ func assertHttpError(r *httptest.ResponseRecorder, t Fataler) { func getContainer(eng *engine.Engine, id string, t Fataler) *daemon.Container { daemon := mkDaemonFromEngine(eng, t) - c := daemon.Get(id) - if c == nil { - t.Fatal(fmt.Errorf("No such container: %s", id)) + c, err := daemon.Get(id) + if err != nil { + t.Fatal(err) } return c } diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go index eec5597306..73c7e24fb4 100644 --- a/pkg/truncindex/truncindex.go +++ b/pkg/truncindex/truncindex.go @@ -10,10 +10,8 @@ import ( ) var ( - // ErrNoID is thrown when attempting to use empty prefixes - ErrNoID = errors.New("prefix can't be empty") - // ErrDuplicateID is thrown when a duplicated id was found - ErrDuplicateID = errors.New("multiple IDs were found") + ErrEmptyPrefix = errors.New("Prefix can't be empty") + ErrAmbiguousPrefix = errors.New("Multiple IDs found with provided prefix") ) func init() { @@ -47,7 +45,7 @@ func (idx *TruncIndex) addID(id string) error { return fmt.Errorf("illegal character: ' '") } if id == "" { - return ErrNoID + return ErrEmptyPrefix } if _, exists := idx.ids[id]; exists { return fmt.Errorf("id already exists: '%s'", id) @@ -87,26 +85,26 @@ func (idx *TruncIndex) Delete(id string) error { // Get retrieves an ID from the TruncIndex. If there are multiple IDs // with the given prefix, an error is thrown. func (idx *TruncIndex) Get(s string) (string, error) { - idx.RLock() - defer idx.RUnlock() + if s == "" { + return "", ErrEmptyPrefix + } var ( id string ) - if s == "" { - return "", ErrNoID - } subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { if id != "" { // we haven't found the ID if there are two or more IDs id = "" - return ErrDuplicateID + return ErrAmbiguousPrefix } id = string(prefix) return nil } + idx.RLock() + defer idx.RUnlock() if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { - return "", fmt.Errorf("no such id: %s", s) + return "", err } if id != "" { return id, nil diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go index 32c41c7d76..8ad1634fd4 100644 --- a/pkg/truncindex/truncindex_test.go +++ b/pkg/truncindex/truncindex_test.go @@ -59,6 +59,11 @@ func TestTruncIndex(t *testing.T) { assertIndexGet(t, index, id[:4], "", true) assertIndexGet(t, index, id[:1], "", true) + // An ambiguous id prefix should return an error + if _, err := index.Get(id[:4]); err == nil || err == nil { + t.Fatal("An ambiguous id prefix should return an error") + } + // 7 characters should NOT conflict assertIndexGet(t, index, id[:7], id, false) assertIndexGet(t, index, id2[:7], id2, false) From 9e37a04665395cb98687cd09b05ba33736984547 Mon Sep 17 00:00:00 2001 From: "Andrew C. Bodine" Date: Fri, 16 Jan 2015 18:52:27 -0800 Subject: [PATCH 014/653] Adds test for api attach via websocket Signed-off-by: Andrew C. Bodine --- integration-cli/docker_api_attach_test.go | 66 +++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 integration-cli/docker_api_attach_test.go diff --git a/integration-cli/docker_api_attach_test.go b/integration-cli/docker_api_attach_test.go new file mode 100644 index 0000000000..191060dded --- /dev/null +++ b/integration-cli/docker_api_attach_test.go @@ -0,0 +1,66 @@ +package main + +import ( + "bytes" + "net" + "os/exec" + "testing" + + "code.google.com/p/go.net/websocket" +) + +func TestGetContainersAttachWebsocket(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-dit", "busybox", "cat") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf(out, err) + } + defer deleteAllContainers() + + rwc, err := net.Dial("unix", "/var/run/docker.sock") + if err != nil { + t.Fatal(err) + } + + cleanedContainerID := stripTrailingCharacters(out) + config, err := websocket.NewConfig( + "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", + "http://localhost", + ) + if err != nil { + t.Fatal(err) + } + + ws, err := websocket.NewClient(config, rwc) + if err != nil { + t.Fatal(err) + } + defer ws.Close() + + expected := []byte("hello") + actual := make([]byte, len(expected)) + outChan := make(chan string) + go func() { + if _, err := ws.Read(actual); err != nil { + t.Fatal(err) + } + outChan <- "done" + }() + + inChan := make(chan string) + go func() { + if _, err := ws.Write(expected); err != nil { + t.Fatal(err) + } + inChan <- "done" + }() + + <-inChan + <-outChan + + if !bytes.Equal(expected, actual) { + t.Fatal("Expected output on websocket to match input") + } + + logDone("container attach websocket - can echo input via cat") +} From 51060ee07aaf3ff10ac7187558bd665f4ddd3507 Mon Sep 17 00:00:00 2001 From: "Andrew C. Bodine" Date: Wed, 21 Jan 2015 11:52:35 -0800 Subject: [PATCH 015/653] Adds docs for /containers/(id)/attach/ws api endpoint Signed-off-by: Andrew C. Bodine --- .../reference/api/docker_remote_api_v1.0.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.1.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.10.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.11.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.12.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.13.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.14.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.15.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.16.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.17.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.2.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.3.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.4.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.5.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.6.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.7.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.8.md | 35 +++++++++++++++++++ .../reference/api/docker_remote_api_v1.9.md | 35 +++++++++++++++++++ 18 files changed, 630 insertions(+) diff --git a/docs/sources/reference/api/docker_remote_api_v1.0.md b/docs/sources/reference/api/docker_remote_api_v1.0.md index 49ff939d6e..399bf7f141 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.0.md +++ b/docs/sources/reference/api/docker_remote_api_v1.0.md @@ -385,6 +385,41 @@ Status Codes: - **404** – no such container - **500** – server error +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.1.md b/docs/sources/reference/api/docker_remote_api_v1.1.md index 6cf7ed74bf..7ddb4ee0e6 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.1.md +++ b/docs/sources/reference/api/docker_remote_api_v1.1.md @@ -385,6 +385,41 @@ Status Codes: - **404** – no such container - **500** – server error +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 2358da101f..b9f421d38f 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -539,6 +539,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1) +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 6303f708e4..97f6c56700 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -574,6 +574,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1) +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md index 685d43ee53..a0e4b209d4 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.12.md +++ b/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -622,6 +622,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1 +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md index 2c38c9aa1e..2ff844ce55 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.13.md +++ b/docs/sources/reference/api/docker_remote_api_v1.13.md @@ -615,6 +615,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1 +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.14.md b/docs/sources/reference/api/docker_remote_api_v1.14.md index 7ce0df6775..237872df22 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.14.md +++ b/docs/sources/reference/api/docker_remote_api_v1.14.md @@ -625,6 +625,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1 +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index 4d27a6150a..5fa4b22758 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -767,6 +767,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1 +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index 500f1bea3c..7ac638d3f4 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -713,6 +713,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1 +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.17.md b/docs/sources/reference/api/docker_remote_api_v1.17.md index f8bca77ed1..5f6520d9df 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.17.md +++ b/docs/sources/reference/api/docker_remote_api_v1.17.md @@ -870,6 +870,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1 +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.md b/docs/sources/reference/api/docker_remote_api_v1.2.md index 46f428bc9a..3438eab2db 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.2.md +++ b/docs/sources/reference/api/docker_remote_api_v1.2.md @@ -397,6 +397,41 @@ Status Codes: - **404** – no such container - **500** – server error +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md index 3a0ea7ba1b..004993b850 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.3.md +++ b/docs/sources/reference/api/docker_remote_api_v1.3.md @@ -445,6 +445,41 @@ Status Codes: - **404** – no such container - **500** – server error +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.md b/docs/sources/reference/api/docker_remote_api_v1.4.md index ac18cd4818..644cd98449 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.4.md +++ b/docs/sources/reference/api/docker_remote_api_v1.4.md @@ -460,6 +460,41 @@ Status Codes: - **404** – no such container - **500** – server error +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.md b/docs/sources/reference/api/docker_remote_api_v1.5.md index 8e0ad9f49a..c9d1de07f1 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.5.md +++ b/docs/sources/reference/api/docker_remote_api_v1.5.md @@ -458,6 +458,41 @@ Status Codes: - **404** – no such container - **500** – server error +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md index f55c114b0c..cfbc0dbe06 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/docker_remote_api_v1.6.md @@ -564,6 +564,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1) +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md index 69562dbbe8..a7593afac5 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/docker_remote_api_v1.7.md @@ -509,6 +509,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1) +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md index 2176a334a6..cee00c6b83 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/docker_remote_api_v1.8.md @@ -557,6 +557,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1) +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index 61102083d1..f8748e96a1 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -561,6 +561,41 @@ Status Codes: 4. Read the extracted size and output it on the correct output 5. Goto 1) +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + ### Wait a container `POST /containers/(id)/wait` From 9c744cb454f7d912ae1dd6215caf550a25929944 Mon Sep 17 00:00:00 2001 From: Abin Shahab Date: Thu, 22 Jan 2015 03:58:43 +0000 Subject: [PATCH 016/653] Fixes apparmor regression Signed-off-by: Abin Shahab (github: ashahab-altiscale) Docker-DCO-1.1-Signed-off-by: Abin Shahab (github: ashahab-altiscale) --- daemon/execdriver/lxc/lxc_template.go | 4 ++-- daemon/execdriver/lxc/lxc_template_unit_test.go | 12 +++++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index dafd525125..9de799dd52 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -75,8 +75,8 @@ lxc.aa_profile = unconfined # In non-privileged mode, lxc will automatically mount /proc and /sys in readonly mode # for security. See: http://man7.org/linux/man-pages/man5/lxc.container.conf.5.html lxc.mount.auto = proc sys - {{if .AppArmor}} -lxc.aa_profile = .AppArmorProfile + {{if .AppArmorProfile}} +lxc.aa_profile = {{.AppArmorProfile}} {{end}} {{end}} diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go index e072f8dbb4..bb622d4bc5 100644 --- a/daemon/execdriver/lxc/lxc_template_unit_test.go +++ b/daemon/execdriver/lxc/lxc_template_unit_test.go @@ -248,7 +248,8 @@ func TestCustomLxcConfigMisc(t *testing.T) { } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) - driver, err := NewDriver(root, "", false) + driver, err := NewDriver(root, "", true) + if err != nil { t.Fatal(err) } @@ -271,9 +272,10 @@ func TestCustomLxcConfigMisc(t *testing.T) { Bridge: "docker0", }, }, - ProcessConfig: processConfig, - CapAdd: []string{"net_admin", "syslog"}, - CapDrop: []string{"kill", "mknod"}, + ProcessConfig: processConfig, + CapAdd: []string{"net_admin", "syslog"}, + CapDrop: []string{"kill", "mknod"}, + AppArmorProfile: "lxc-container-default-with-nesting", } p, err := driver.generateLXCConfig(command) @@ -287,7 +289,7 @@ func TestCustomLxcConfigMisc(t *testing.T) { grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24") grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1") grepFile(t, p, "lxc.network.flags = up") - + grepFile(t, p, "lxc.aa_profile = lxc-container-default-with-nesting") // hostname grepFile(t, p, "lxc.utsname = testhost") grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") From 02a793c6a133f46129d0fc83ce218d3a92f1e644 Mon Sep 17 00:00:00 2001 From: Lorenz Leutgeb Date: Wed, 7 Jan 2015 14:08:34 +0100 Subject: [PATCH 017/653] doc: Improve article on HTTPS * Adjust header to match _page_title * Add instructions on deletion of CSRs and setting permissions * Simplify some path expressions and commands * Consqeuently use ~ instead of ${HOME} * Precise formulation ('key' vs. 'public key') * Fix wrong indentation of output of `openssl req` * Use dash ('--') instead of minus ('-') Remark on permissions: It's not a problem to `chmod 0400` the private keys, because the Docker daemon runs as root (can read the file anyway) and the Docker client runs as user. Signed-off-by: Lorenz Leutgeb --- docs/sources/articles/https.md | 65 +++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 25 deletions(-) diff --git a/docs/sources/articles/https.md b/docs/sources/articles/https.md index 41ba2cce5e..a79e28a5d4 100644 --- a/docs/sources/articles/https.md +++ b/docs/sources/articles/https.md @@ -40,20 +40,20 @@ First generate CA private and public keys: Verifying - Enter pass phrase for ca-key.pem: $ openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem Enter pass phrase for ca-key.pem: - You are about to be asked to enter information that will be incorporated - into your certificate request. - What you are about to enter is what is called a Distinguished Name or a DN. - There are quite a few fields but you can leave some blank - For some fields there will be a default value, - If you enter '.', the field will be left blank. - ----- - Country Name (2 letter code) [AU]: - State or Province Name (full name) [Some-State]:Queensland - Locality Name (eg, city) []:Brisbane - Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker Inc - Organizational Unit Name (eg, section) []:Boot2Docker - Common Name (e.g. server FQDN or YOUR name) []:$HOST - Email Address []:Sven@home.org.au + You are about to be asked to enter information that will be incorporated + into your certificate request. + What you are about to enter is what is called a Distinguished Name or a DN. + There are quite a few fields but you can leave some blank + For some fields there will be a default value, + If you enter '.', the field will be left blank. + ----- + Country Name (2 letter code) [AU]: + State or Province Name (full name) [Some-State]:Queensland + Locality Name (eg, city) []:Brisbane + Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker Inc + Organizational Unit Name (eg, section) []:Boot2Docker + Common Name (e.g. server FQDN or YOUR name) []:$HOST + Email Address []:Sven@home.org.au Now that we have a CA, you can create a server key and certificate signing request (CSR). Make sure that "Common Name" (i.e., server FQDN or YOUR @@ -69,7 +69,7 @@ name) matches the hostname you will use to connect to Docker: e is 65537 (0x10001) $ openssl req -subj "/CN=$HOST" -new -key server-key.pem -out server.csr -Next, we're going to sign the key with our CA: +Next, we're going to sign the public key with our CA: $ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \ -CAcreateserial -out server-cert.pem @@ -93,7 +93,7 @@ config file: $ echo extendedKeyUsage = clientAuth > extfile.cnf -Now sign the key: +Now sign the public key: $ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \ -CAcreateserial -out cert.pem -extfile extfile.cnf @@ -102,6 +102,24 @@ Now sign the key: Getting CA Private Key Enter pass phrase for ca-key.pem: +After generating `cert.pem` and `server-cert.pem` you can safely remove the +two certificate signing requests: + + $ rm -v client.csr server.csr + +With a default `umask` of 022 your secret keys will be *world-readable* and +writable for you and your group. + +To remove write permissions for your keys in order to protect them from accidental +damage and make them only readable to you issue the following file mode changes: + + $ chmod -v 0400 ca-key.pem key.pem server-key.pem + +Certificates can be world-readable, but you might want to remove write access to +prevent accidental damage: + + $ chmod -v 0444 ca.pem server-cert.pem cert.pem + Now you can make the Docker daemon only accept connections from clients providing a certificate trusted by our CA: @@ -130,16 +148,13 @@ need to provide your client keys, certificates and trusted CA: ## Secure by default If you want to secure your Docker client connections by default, you can move -the files to the `.docker` directory in your home directory - and set the +the files to the `.docker` directory in your home directory -- and set the `DOCKER_HOST` and `DOCKER_TLS_VERIFY` variables as well (instead of passing `-H=tcp://:2376` and `--tlsverify` on every call). - $ mkdir -p ~/.docker - $ cp ca.pem ~/.docker/ca.pem - $ cp cert.pem ~/.docker/cert.pem - $ cp key.pem ~/.docker/key.pem - $ export DOCKER_HOST=tcp://:2376 - $ export DOCKER_TLS_VERIFY=1 + $ mkdir -pv ~/.docker + $ cp -v {ca,cert,key}.pem ~/.docker + $ export DOCKER_HOST=tcp://:2376 DOCKER_TLS_VERIFY=1 Docker will now connect securely by default: @@ -165,11 +180,11 @@ Docker in various other modes by mixing the flags. certificate and authenticate server based on given CA If found, the client will send its client certificate, so you just need -to drop your keys into `~/.docker/.pem`. Alternatively, +to drop your keys into `~/.docker/{ca,cert,key}.pem`. Alternatively, if you want to store your keys in another location, you can specify that location using the environment variable `DOCKER_CERT_PATH`. - $ export DOCKER_CERT_PATH=${HOME}/.docker/zone1/ + $ export DOCKER_CERT_PATH=~/.docker/zone1/ $ docker --tlsverify ps ### Connecting to the Secure Docker port using `curl` From 6ca2875e580e16a309eb9c9486bb358bde3622ff Mon Sep 17 00:00:00 2001 From: Lorenz Leutgeb Date: Thu, 8 Jan 2015 23:19:23 +0100 Subject: [PATCH 018/653] doc: Editorial changes as suggested by @fredlf Refer to: * https://github.com/docker/docker/pull/9952#discussion_r22686652 * https://github.com/docker/docker/pull/9952#discussion_r22686804 Signed-off-by: Lorenz Leutgeb --- docs/sources/articles/https.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sources/articles/https.md b/docs/sources/articles/https.md index a79e28a5d4..9e3835534e 100644 --- a/docs/sources/articles/https.md +++ b/docs/sources/articles/https.md @@ -107,11 +107,11 @@ two certificate signing requests: $ rm -v client.csr server.csr -With a default `umask` of 022 your secret keys will be *world-readable* and +With a default `umask` of 022, your secret keys will be *world-readable* and writable for you and your group. -To remove write permissions for your keys in order to protect them from accidental -damage and make them only readable to you issue the following file mode changes: +In order to protect your keys from accidental damage, you will want to remove their +write permissions. To make them read-only, change file modes as follows: $ chmod -v 0400 ca-key.pem key.pem server-key.pem From 048b20e58a1b9d516dd6fb34676d765ebd5a8959 Mon Sep 17 00:00:00 2001 From: Lorenz Leutgeb Date: Fri, 9 Jan 2015 00:24:59 +0100 Subject: [PATCH 019/653] doc: Minor semantical/editorial fixes in HTTPS article "read-only" vs. "only readable by you" Refer to: https://github.com/docker/docker/pull/9952#discussion_r22690266 Signed-off-by: Lorenz Leutgeb --- docs/sources/articles/https.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/articles/https.md b/docs/sources/articles/https.md index 9e3835534e..775573ec33 100644 --- a/docs/sources/articles/https.md +++ b/docs/sources/articles/https.md @@ -111,7 +111,7 @@ With a default `umask` of 022, your secret keys will be *world-readable* and writable for you and your group. In order to protect your keys from accidental damage, you will want to remove their -write permissions. To make them read-only, change file modes as follows: +write permissions. To make them only readable by you, change file modes as follows: $ chmod -v 0400 ca-key.pem key.pem server-key.pem From a51554988e615b317e95125f5612a28c3bff8e8a Mon Sep 17 00:00:00 2001 From: Lorenz Leutgeb Date: Thu, 22 Jan 2015 21:46:01 +0100 Subject: [PATCH 020/653] Fix inconsistent formatting Colon was bold, but regular at other occurences. Blame cf27b310c4fc8d2c13ba181398a628d03e1e3c58 Signed-off-by: Lorenz Leutgeb --- docs/sources/articles/https.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sources/articles/https.md b/docs/sources/articles/https.md index 775573ec33..8fb0bb869c 100644 --- a/docs/sources/articles/https.md +++ b/docs/sources/articles/https.md @@ -26,7 +26,7 @@ it will only connect to servers with a certificate signed by that CA. ## Create a CA, server and client keys with OpenSSL -> **Note:** replace all instances of `$HOST` in the following example with the +> **Note**: replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. First generate CA private and public keys: @@ -59,7 +59,7 @@ Now that we have a CA, you can create a server key and certificate signing request (CSR). Make sure that "Common Name" (i.e., server FQDN or YOUR name) matches the hostname you will use to connect to Docker: -> **Note:** replace all instances of `$HOST` in the following example with the +> **Note**: replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. $ openssl genrsa -out server-key.pem 2048 @@ -129,7 +129,7 @@ providing a certificate trusted by our CA: To be able to connect to Docker and validate its certificate, you now need to provide your client keys, certificates and trusted CA: -> **Note:** replace all instances of `$HOST` in the following example with the +> **Note**: replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. $ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem \ From a124bfaef37a9d209de881325dbe653d9b07ef4d Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Thu, 22 Jan 2015 15:58:59 -0500 Subject: [PATCH 021/653] integration-cli: wait for container before sending ^D Signed-off-by: Tibor Vass --- integration-cli/docker_cli_attach_unix_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/integration-cli/docker_cli_attach_unix_test.go b/integration-cli/docker_cli_attach_unix_test.go index 829b14deb9..a3bfa5b1c8 100644 --- a/integration-cli/docker_cli_attach_unix_test.go +++ b/integration-cli/docker_cli_attach_unix_test.go @@ -81,6 +81,9 @@ func TestAttachAfterDetach(t *testing.T) { }() time.Sleep(500 * time.Millisecond) + if err := waitRun(name); err != nil { + t.Fatal(err) + } cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) cpty.Write([]byte{17}) From 47769994e5cba81a96529b418052afce7a6d51a3 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 22 Jan 2015 15:42:17 -0700 Subject: [PATCH 022/653] Remove windows from the list of supported platforms Since it can still be tested natively without this, this won't cause any harm while we fix the tests to actually work on Windows. Signed-off-by: Andrew "Tianon" Page --- Dockerfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index c452a6ad9d..50920945e2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -84,8 +84,10 @@ RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 ENV DOCKER_CROSSPLATFORMS \ linux/386 linux/arm \ darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm \ - windows/amd64 windows/386 + freebsd/amd64 freebsd/386 freebsd/arm + +# TODO when https://jenkins.dockerproject.com/job/Windows/ is green, add windows back to the list above +# windows/amd64 windows/386 # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 From 99dc224d850a8838f42b9f41229b484197f75fa5 Mon Sep 17 00:00:00 2001 From: GennadySpb Date: Thu, 22 Jan 2015 12:07:20 +0300 Subject: [PATCH 023/653] Update using_supervisord.md Fix factual error change made by: GennadySpb Signed-off-by: Sven Dowideit --- docs/sources/articles/using_supervisord.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/articles/using_supervisord.md b/docs/sources/articles/using_supervisord.md index 01e60b6596..5806707ee6 100644 --- a/docs/sources/articles/using_supervisord.md +++ b/docs/sources/articles/using_supervisord.md @@ -39,7 +39,7 @@ our container. Here we're installing the `openssh-server`, `apache2` and `supervisor` -(which provides the Supervisor daemon) packages. We're also creating two +(which provides the Supervisor daemon) packages. We're also creating four new directories that are needed to run our SSH daemon and Supervisor. ## Adding Supervisor's configuration file From eaf1b88212bb10019cf1d7b1555a305c12001f06 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 23 Jan 2015 12:17:55 +1000 Subject: [PATCH 024/653] comment out the docker and curl lines we'll run later Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/articles/https/Makefile | 7 ++++--- docs/sources/articles/https/parsedocs.sh | 8 +++++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/docs/sources/articles/https/Makefile b/docs/sources/articles/https/Makefile index 48fe49f2b6..b751c1e43f 100644 --- a/docs/sources/articles/https/Makefile +++ b/docs/sources/articles/https/Makefile @@ -13,11 +13,12 @@ cert: build certs: cert run: - docker -d -D --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem -H=0.0.0.0:6666 --pidfile=$(pwd)/docker.pid --graph=$(pwd)/graph + sudo docker -d -D --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem -H=0.0.0.0:6666 --pidfile=$(pwd)/docker.pid --graph=$(pwd)/graph client: - docker --tls --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$(HOST):6666 version - docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$(HOST):6666 info + sudo docker --tls --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$(HOST):6666 version + sudo docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$(HOST):6666 info + sudo curl https://$(HOST):6666/images/json --cert cert.pem --key key.pem --cacert ca.pem clean: rm ca-key.pem ca.pem ca.srl cert.pem client.csr extfile.cnf key.pem server-cert.pem server-key.pem server.csr diff --git a/docs/sources/articles/https/parsedocs.sh b/docs/sources/articles/https/parsedocs.sh index 56be4103a4..f9df33c337 100755 --- a/docs/sources/articles/https/parsedocs.sh +++ b/docs/sources/articles/https/parsedocs.sh @@ -1,4 +1,10 @@ #!/bin/sh echo "#!/bin/sh" -cat ../https.md | awk '{if (sub(/\\$/,"")) printf "%s", $0; else print $0}' | grep ' $ ' | sed 's/ $ //g' | sed 's/2375/7777/g' | sed 's/2376/7778/g' +cat ../https.md | awk '{if (sub(/\\$/,"")) printf "%s", $0; else print $0}' \ + | grep ' $ ' \ + | sed 's/ $ //g' \ + | sed 's/2375/7777/g' \ + | sed 's/2376/7778/g' \ + | sed 's/^docker/# docker/g' \ + | sed 's/^curl/# curl/g' From aac6090f2df3cc9192d1813e3c1f48b882f063d4 Mon Sep 17 00:00:00 2001 From: Qiang Huang Date: Fri, 23 Jan 2015 16:36:55 +0800 Subject: [PATCH 025/653] add args support for check-config.sh Signed-off-by: Qiang Huang --- contrib/check-config.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/contrib/check-config.sh b/contrib/check-config.sh index 4f17540731..dc8e7ffcfb 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -10,7 +10,12 @@ possibleConfigs=( "/usr/src/linux-$(uname -r)/.config" '/usr/src/linux/.config' ) -: ${CONFIG:="${possibleConfigs[0]}"} + +if [ $# -gt 0 ]; then + CONFIG="$1" +else + CONFIG="${possibleConfigs[0]}" +fi if ! command -v zgrep &> /dev/null; then zgrep() { From 310337dc89a78cbe245977236f02dcda73728df8 Mon Sep 17 00:00:00 2001 From: Michal Minar Date: Mon, 19 Jan 2015 13:44:04 +0100 Subject: [PATCH 026/653] Stop and Kill commands made robust If stop/kill command hits a short window between process' exit and container's cleanup, it will no longer fail with 'no such process' error. Resolves #10182 Signed-off-by: Michal Minar --- daemon/container.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index c1c215ffee..90cd3d8327 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -652,6 +652,16 @@ func (container *Container) KillSig(sig int) error { return container.daemon.Kill(container, sig) } +// Wrapper aroung KillSig() suppressing "no such process" error. +func (container *Container) killPossiblyDeadProcess(sig int) error { + err := container.KillSig(sig) + if err == syscall.ESRCH { + log.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig) + return nil + } + return err +} + func (container *Container) Pause() error { if container.IsPaused() { return fmt.Errorf("Container %s is already paused", container.ID) @@ -678,7 +688,7 @@ func (container *Container) Kill() error { } // 1. Send SIGKILL - if err := container.KillSig(9); err != nil { + if err := container.killPossiblyDeadProcess(9); err != nil { return err } @@ -688,7 +698,10 @@ func (container *Container) Kill() error { if pid := container.GetPid(); pid != 0 { log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) if err := syscall.Kill(pid, 9); err != nil { - return err + if err != syscall.ESRCH { + return err + } + log.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid) } } } @@ -703,9 +716,9 @@ func (container *Container) Stop(seconds int) error { } // 1. Send a SIGTERM - if err := container.KillSig(15); err != nil { + if err := container.killPossiblyDeadProcess(15); err != nil { log.Infof("Failed to send SIGTERM to the process, force killing") - if err := container.KillSig(9); err != nil { + if err := container.killPossiblyDeadProcess(9); err != nil { return err } } From 54c10fe81d1894f9683293601a53a0d87a0d7fbd Mon Sep 17 00:00:00 2001 From: Tony Miller Date: Thu, 22 Jan 2015 23:06:21 +0900 Subject: [PATCH 027/653] document the ExtraHosts parameter for /containers/create for the remote API I think this was added from version 1.15. Signed-off-by: Tony Miller --- docs/sources/reference/api/docker_remote_api_v1.15.md | 3 +++ docs/sources/reference/api/docker_remote_api_v1.16.md | 3 +++ docs/sources/reference/api/docker_remote_api_v1.17.md | 3 +++ 3 files changed, 9 insertions(+) diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index 4d27a6150a..51b13ecc58 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -148,6 +148,7 @@ Create a container "Privileged": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], + "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], @@ -220,6 +221,8 @@ Json Parameters: a boolean value. - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/host` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilties to add to the container. diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index 500f1bea3c..8ee445be18 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -148,6 +148,7 @@ Create a container "Privileged": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], + "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], @@ -220,6 +221,8 @@ Json Parameters: a boolean value. - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/host` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilties to add to the container. diff --git a/docs/sources/reference/api/docker_remote_api_v1.17.md b/docs/sources/reference/api/docker_remote_api_v1.17.md index f8bca77ed1..3cd7d6a0aa 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.17.md +++ b/docs/sources/reference/api/docker_remote_api_v1.17.md @@ -149,6 +149,7 @@ Create a container "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], + "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], @@ -223,6 +224,8 @@ Json Parameters: Specified as a boolean value. - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/host` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilties to add to the container. From ba3bad66f92da057dba2a43ff9a512cf7ad72375 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Fri, 23 Jan 2015 09:54:17 -0800 Subject: [PATCH 028/653] Always store images with tarsum.v1 checksum added Updates `image.StoreImage()` to always ensure that images that are installed in Docker have a tarsum.v1 checksum. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- image/image.go | 13 ++++++++----- pkg/tarsum/tarsum.go | 1 + pkg/tarsum/versioning.go | 12 ++++++++++++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/image/image.go b/image/image.go index 7664602cd8..3cf26f37c8 100644 --- a/image/image.go +++ b/image/image.go @@ -81,8 +81,8 @@ func LoadImage(root string) (*Image, error) { // StoreImage stores file system layer data for the given image to the // image's registered storage driver. Image metadata is stored in a file -// at the specified root directory. This function also computes the TarSum -// of `layerData` (currently using tarsum.dev). +// at the specified root directory. This function also computes a checksum +// of `layerData` if the image does not have one already. func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error { // Store the layer var ( @@ -96,15 +96,18 @@ func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error if layerData != nil { // If the image doesn't have a checksum, we should add it. The layer // checksums are verified when they are pulled from a remote, but when - // a container is committed it should be added here. - if img.Checksum == "" { + // a container is committed it should be added here. Also ensure that + // the stored checksum has the latest version of tarsum (assuming we + // are using tarsum). + if tarsum.VersionLabelForChecksum(img.Checksum) != tarsum.Version1.String() { + // Either there was no checksum or it's not a tarsum.v1 layerDataDecompressed, err := archive.DecompressStream(layerData) if err != nil { return err } defer layerDataDecompressed.Close() - if layerTarSum, err = tarsum.NewTarSum(layerDataDecompressed, true, tarsum.VersionDev); err != nil { + if layerTarSum, err = tarsum.NewTarSum(layerDataDecompressed, true, tarsum.Version1); err != nil { return err } diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go index c6a7294e74..88fcbe4a94 100644 --- a/pkg/tarsum/tarsum.go +++ b/pkg/tarsum/tarsum.go @@ -122,6 +122,7 @@ type tHashConfig struct { } var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. standardHashConfigs = map[string]tHashConfig{ "sha256": {name: "sha256", hash: crypto.SHA256}, "sha512": {name: "sha512", hash: crypto.SHA512}, diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go index be1d07040f..0ceb5298a3 100644 --- a/pkg/tarsum/versioning.go +++ b/pkg/tarsum/versioning.go @@ -22,6 +22,18 @@ const ( VersionDev ) +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + // Get a list of all known tarsum Version func GetVersions() []Version { v := []Version{} From c49cd3d2a51ad6d2304c24f7b609cde804a0d6c5 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Fri, 23 Jan 2015 13:17:54 -0800 Subject: [PATCH 029/653] Make debugs logs suck less. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- graph/pull.go | 1 - image/image.go | 2 -- 2 files changed, 3 deletions(-) diff --git a/graph/pull.go b/graph/pull.go index f76a156056..f9c5c7b421 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -153,7 +153,6 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo * for _, image := range repoData.ImgList { downloadImage := func(img *registry.ImgData) { if askedTag != "" && img.Tag != askedTag { - log.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID) if parallel { errors <- nil } diff --git a/image/image.go b/image/image.go index 7664602cd8..0feb2b238c 100644 --- a/image/image.go +++ b/image/image.go @@ -9,7 +9,6 @@ import ( "strconv" "time" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/runconfig" @@ -274,7 +273,6 @@ func (img *Image) CheckDepth() error { func NewImgJSON(src []byte) (*Image, error) { ret := &Image{} - log.Debugf("Json string: {%s}", src) // FIXME: Is there a cleaner way to "purify" the input json? if err := json.Unmarshal(src, ret); err != nil { return nil, err From ec953b0e7ba89ff3b13b3187d9cee0b8c232b4ff Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Fri, 23 Jan 2015 14:00:15 -0800 Subject: [PATCH 030/653] Blacklist zfs with overlay Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- daemon/graphdriver/driver.go | 2 ++ daemon/graphdriver/overlay/overlay.go | 3 +++ 2 files changed, 5 insertions(+) diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index c63e1b45db..d89fd6c861 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -25,6 +25,7 @@ const ( FsMagicReiserFs = FsMagic(0x52654973) FsMagicSmbFs = FsMagic(0x0000517B) FsMagicJffs2Fs = FsMagic(0x000072b6) + FsMagicZfs = FsMagic(0x2fc12fc1) FsMagicUnsupported = FsMagic(0x00000000) ) @@ -58,6 +59,7 @@ var ( FsMagicReiserFs: "reiserfs", FsMagicSmbFs: "smb", FsMagicJffs2Fs: "jffs2", + FsMagicZfs: "zfs", FsMagicUnsupported: "unsupported", } ) diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go index 27784c14a3..37162b5caf 100644 --- a/daemon/graphdriver/overlay/overlay.go +++ b/daemon/graphdriver/overlay/overlay.go @@ -118,6 +118,9 @@ func Init(home string, options []string) (graphdriver.Driver, error) { case graphdriver.FsMagicAufs: log.Error("'overlay' is not supported over aufs.") return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicZfs: + log.Error("'overlay' is not supported over zfs.") + return nil, graphdriver.ErrIncompatibleFS } // Create the driver home dir From 2cd5b7dae80c5451b729a0be2ade03cbdf2c5c26 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 23 Jan 2015 14:44:30 -0800 Subject: [PATCH 031/653] Use filepath instead of path Currently loading the trust key uses path instead of filepath. This creates problems on some operating systems such as Windows. Fixes #10319 Signed-off-by: Derek McGowan (github: dmcgowan) --- api/common.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/common.go b/api/common.go index fb3eefaca0..a96a4066ac 100644 --- a/api/common.go +++ b/api/common.go @@ -4,7 +4,7 @@ import ( "fmt" "mime" "os" - "path" + "path/filepath" "strings" log "github.com/Sirupsen/logrus" @@ -55,7 +55,7 @@ func MatchesContentType(contentType, expectedType string) bool { // LoadOrCreateTrustKey attempts to load the libtrust key at the given path, // otherwise generates a new one func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := os.MkdirAll(path.Dir(trustKeyPath), 0700) + err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700) if err != nil { return nil, err } From d5c78a4c07929eda2d448f9b2a09fb3d65a75a2d Mon Sep 17 00:00:00 2001 From: Jonathan Rudenberg Date: Fri, 23 Jan 2015 14:32:36 -0800 Subject: [PATCH 032/653] Fix missing err assignment in bridge creation Signed-off-by: Jonathan Rudenberg --- daemon/networkdriver/bridge/driver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index 8e28a710f5..0d3f27517d 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -396,7 +396,7 @@ func configureBridge(bridgeIP string, bridgeIPv6 string, enableIPv6 bool) error return err } - if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { + if err := netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { return fmt.Errorf("Unable to add private network: %s", err) } @@ -413,7 +413,7 @@ func configureBridge(bridgeIP string, bridgeIPv6 string, enableIPv6 bool) error return err } - if netlink.NetworkLinkAddIp(iface, ipAddr6, ipNet6); err != nil { + if err := netlink.NetworkLinkAddIp(iface, ipAddr6, ipNet6); err != nil { return fmt.Errorf("Unable to add private IPv6 network: %s", err) } } From 957cbdbf302750f3fb3467237bebf29d87234208 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 30 Dec 2014 14:05:00 -0800 Subject: [PATCH 033/653] Move InspectExecID test to exec. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_exec_test.go | 35 ++++++++++++++++++++++ integration-cli/docker_cli_inspect_test.go | 35 ---------------------- 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 5dc0e8d71a..0740c7b3e2 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -453,3 +453,38 @@ func TestExecCgroup(t *testing.T) { logDone("exec - exec has the container cgroups") } + +func TestInspectExecID(t *testing.T) { + defer deleteAllContainers() + + out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "top")) + if exitCode != 0 || err != nil { + t.Fatalf("failed to run container: %s, %v", out, err) + } + id := strings.TrimSuffix(out, "\n") + + out, err = inspectField(id, "ExecIDs") + if err != nil { + t.Fatalf("failed to inspect container: %s, %v", out, err) + } + if out != "" { + t.Fatalf("ExecIDs should be empty, got: %s", out) + } + + exitCode, err = runCommand(exec.Command(dockerBinary, "exec", "-d", id, "ls", "/")) + if exitCode != 0 || err != nil { + t.Fatalf("failed to exec in container: %s, %v", out, err) + } + + out, err = inspectField(id, "ExecIDs") + if err != nil { + t.Fatalf("failed to inspect container: %s, %v", out, err) + } + + out = strings.TrimSuffix(out, "\n") + if out == "[]" || out == "" { + t.Fatalf("ExecIDs should not be empty, got: %s", out) + } + + logDone("inspect - inspect a container with ExecIDs") +} diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go index ee69a89a43..cf42217ac8 100644 --- a/integration-cli/docker_cli_inspect_test.go +++ b/integration-cli/docker_cli_inspect_test.go @@ -21,38 +21,3 @@ func TestInspectImage(t *testing.T) { logDone("inspect - inspect an image") } - -func TestInspectExecID(t *testing.T) { - defer deleteAllContainers() - - out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "top")) - if exitCode != 0 || err != nil { - t.Fatalf("failed to run container: %s, %v", out, err) - } - id := strings.TrimSuffix(out, "\n") - - out, err = inspectField(id, "ExecIDs") - if err != nil { - t.Fatalf("failed to inspect container: %s, %v", out, err) - } - if out != "" { - t.Fatalf("ExecIDs should be empty, got: %s", out) - } - - exitCode, err = runCommand(exec.Command(dockerBinary, "exec", "-d", id, "ls", "/")) - if exitCode != 0 || err != nil { - t.Fatalf("failed to exec in container: %s, %v", out, err) - } - - out, err = inspectField(id, "ExecIDs") - if err != nil { - t.Fatalf("failed to inspect container: %s, %v", out, err) - } - - out = strings.TrimSuffix(out, "\n") - if out == "[]" || out == "" { - t.Fatalf("ExecIDs should not be empty, got: %s", out) - } - - logDone("inspect - inspect a container with ExecIDs") -} From ecdbc1a0aff40fa28389d1971528641e2824e5a9 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 30 Dec 2014 14:22:31 -0800 Subject: [PATCH 034/653] Add build flag to exec test. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_api_exec_test.go | 2 ++ integration-cli/docker_cli_exec_test.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go index df7122dd75..1ed99a2561 100644 --- a/integration-cli/docker_api_exec_test.go +++ b/integration-cli/docker_api_exec_test.go @@ -1,3 +1,5 @@ +// +build !test_no_exec + package main import ( diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 0740c7b3e2..0b98f804c3 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -1,3 +1,5 @@ +// +build !test_no_exec + package main import ( From 547c95957679672c3ed2e97c7a2950d3c0a03321 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 30 Dec 2014 15:24:53 -0800 Subject: [PATCH 035/653] Update project/make.sh to use execdriver buildtag if passed. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- project/make.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/project/make.sh b/project/make.sh index bb2a3419fa..97751cb67c 100755 --- a/project/make.sh +++ b/project/make.sh @@ -93,6 +93,10 @@ if [ -z "$DOCKER_CLIENTONLY" ]; then DOCKER_BUILDTAGS+=" daemon" fi +if [ "$DOCKER_EXECDRIVER" = 'lxc' ]; then + DOCKER_BUILDTAGS+=' test_no_exec' +fi + # Use these flags when compiling the tests and final binary LDFLAGS=' -X '$DOCKER_PKG'/dockerversion.GITCOMMIT "'$GITCOMMIT'" From 43d1c2010125e3c1ef1013ee2e8b4d9371d70d77 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Mon, 19 Jan 2015 12:11:19 -0800 Subject: [PATCH 036/653] Move links exec test & exec dir test. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_exec_test.go | 133 +++++++++++++++++++++++ integration-cli/docker_cli_links_test.go | 32 ------ integration-cli/docker_cli_run_test.go | 100 ----------------- 3 files changed, 133 insertions(+), 132 deletions(-) diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 0b98f804c3..85906a8ae0 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "os/exec" + "path/filepath" "reflect" "sort" "strings" @@ -490,3 +491,135 @@ func TestInspectExecID(t *testing.T) { logDone("inspect - inspect a container with ExecIDs") } + +func TestLinksPingLinkedContainersOnRename(t *testing.T) { + var out string + out, _, _ = dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + idA := stripTrailingCharacters(out) + if idA == "" { + t.Fatal(out, "id should not be nil") + } + out, _, _ = dockerCmd(t, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "sleep", "10") + idB := stripTrailingCharacters(out) + if idB == "" { + t.Fatal(out, "id should not be nil") + } + + execCmd := exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") + out, _, err := runCommandWithOutput(execCmd) + if err != nil { + t.Fatal(out, err) + } + + dockerCmd(t, "rename", "container1", "container_new") + + execCmd = exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") + out, _, err = runCommandWithOutput(execCmd) + if err != nil { + t.Fatal(out, err) + } + + deleteAllContainers() + + logDone("links - ping linked container upon rename") +} + +func TestRunExecDir(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + id := strings.TrimSpace(out) + execDir := filepath.Join(execDriverPath, id) + stateFile := filepath.Join(execDir, "state.json") + contFile := filepath.Join(execDir, "container.json") + + { + fi, err := os.Stat(execDir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err != nil { + t.Fatal(err) + } + fi, err = os.Stat(contFile) + if err != nil { + t.Fatal(err) + } + } + + stopCmd := exec.Command(dockerBinary, "stop", id) + out, _, err = runCommandWithOutput(stopCmd) + if err != nil { + t.Fatal(err, out) + } + { + fi, err := os.Stat(execDir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err == nil { + t.Fatalf("Statefile %q is exists for stopped container!", stateFile) + } + if !os.IsNotExist(err) { + t.Fatalf("Error should be about non-existing, got %s", err) + } + fi, err = os.Stat(contFile) + if err == nil { + t.Fatalf("Container file %q is exists for stopped container!", contFile) + } + if !os.IsNotExist(err) { + t.Fatalf("Error should be about non-existing, got %s", err) + } + } + startCmd := exec.Command(dockerBinary, "start", id) + out, _, err = runCommandWithOutput(startCmd) + if err != nil { + t.Fatal(err, out) + } + { + fi, err := os.Stat(execDir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err != nil { + t.Fatal(err) + } + fi, err = os.Stat(contFile) + if err != nil { + t.Fatal(err) + } + } + rmCmd := exec.Command(dockerBinary, "rm", "-f", id) + out, _, err = runCommandWithOutput(rmCmd) + if err != nil { + t.Fatal(err, out) + } + { + _, err := os.Stat(execDir) + if err == nil { + t.Fatal(err) + } + if err == nil { + t.Fatalf("Exec directory %q is exists for removed container!", execDir) + } + if !os.IsNotExist(err) { + t.Fatalf("Error should be about non-existing, got %s", err) + } + } + + logDone("run - check execdriver dir behavior") +} diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index fc99ec57fb..015db0f89f 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -92,38 +92,6 @@ func TestLinksPingLinkedContainersAfterRename(t *testing.T) { logDone("links - ping linked container after rename") } -func TestLinksPingLinkedContainersOnRename(t *testing.T) { - var out string - out, _, _ = dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") - idA := stripTrailingCharacters(out) - if idA == "" { - t.Fatal(out, "id should not be nil") - } - out, _, _ = dockerCmd(t, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "sleep", "10") - idB := stripTrailingCharacters(out) - if idB == "" { - t.Fatal(out, "id should not be nil") - } - - execCmd := exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") - out, _, err := runCommandWithOutput(execCmd) - if err != nil { - t.Fatal(out, err) - } - - dockerCmd(t, "rename", "container1", "container_new") - - execCmd = exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") - out, _, err = runCommandWithOutput(execCmd) - if err != nil { - t.Fatal(out, err) - } - - deleteAllContainers() - - logDone("links - ping linked container upon rename") -} - func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { dockerCmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 6da5b76565..a010c4e22a 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2404,106 +2404,6 @@ func TestRunMountOrdering(t *testing.T) { logDone("run - volumes are mounted in the correct order") } -func TestRunExecDir(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") - out, _, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err, out) - } - id := strings.TrimSpace(out) - execDir := filepath.Join(execDriverPath, id) - stateFile := filepath.Join(execDir, "state.json") - contFile := filepath.Join(execDir, "container.json") - - { - fi, err := os.Stat(execDir) - if err != nil { - t.Fatal(err) - } - if !fi.IsDir() { - t.Fatalf("%q must be a directory", execDir) - } - fi, err = os.Stat(stateFile) - if err != nil { - t.Fatal(err) - } - fi, err = os.Stat(contFile) - if err != nil { - t.Fatal(err) - } - } - - stopCmd := exec.Command(dockerBinary, "stop", id) - out, _, err = runCommandWithOutput(stopCmd) - if err != nil { - t.Fatal(err, out) - } - { - fi, err := os.Stat(execDir) - if err != nil { - t.Fatal(err) - } - if !fi.IsDir() { - t.Fatalf("%q must be a directory", execDir) - } - fi, err = os.Stat(stateFile) - if err == nil { - t.Fatalf("Statefile %q is exists for stopped container!", stateFile) - } - if !os.IsNotExist(err) { - t.Fatalf("Error should be about non-existing, got %s", err) - } - fi, err = os.Stat(contFile) - if err == nil { - t.Fatalf("Container file %q is exists for stopped container!", contFile) - } - if !os.IsNotExist(err) { - t.Fatalf("Error should be about non-existing, got %s", err) - } - } - startCmd := exec.Command(dockerBinary, "start", id) - out, _, err = runCommandWithOutput(startCmd) - if err != nil { - t.Fatal(err, out) - } - { - fi, err := os.Stat(execDir) - if err != nil { - t.Fatal(err) - } - if !fi.IsDir() { - t.Fatalf("%q must be a directory", execDir) - } - fi, err = os.Stat(stateFile) - if err != nil { - t.Fatal(err) - } - fi, err = os.Stat(contFile) - if err != nil { - t.Fatal(err) - } - } - rmCmd := exec.Command(dockerBinary, "rm", "-f", id) - out, _, err = runCommandWithOutput(rmCmd) - if err != nil { - t.Fatal(err, out) - } - { - _, err := os.Stat(execDir) - if err == nil { - t.Fatal(err) - } - if err == nil { - t.Fatalf("Exec directory %q is exists for removed container!", execDir) - } - if !os.IsNotExist(err) { - t.Fatalf("Error should be about non-existing, got %s", err) - } - } - - logDone("run - check execdriver dir behavior") -} - // Regression test for https://github.com/docker/docker/issues/8259 func TestRunReuseBindVolumeThatIsSymlink(t *testing.T) { tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") From 0091c490dd31b9b9a5248cacaaf1f515ff26a5e7 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 23 Jan 2015 17:24:05 -0800 Subject: [PATCH 037/653] Revert progressreader to not defer close When progress reader closes it overwrites the progress line with the full progress bar, replaces the completed message. Signed-off-by: Derek McGowan (github: dmcgowan) --- graph/push.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/graph/push.go b/graph/push.go index b8fb09882e..3a9f1ace06 100644 --- a/graph/push.go +++ b/graph/push.go @@ -236,10 +236,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin // Send the layer log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) - prgRd := utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing") - defer prgRd.Close() - - checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, prgRd, ep, token, jsonRaw) + checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) if err != nil { return "", err } @@ -343,10 +340,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out } if !exists { - prgRd := utils.ProgressReader(arch, int(img.Size), out, sf, false, utils.TruncateID(img.ID), "Pushing") - defer prgRd.Close() - - err = r.PutV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, prgRd, auth) + err = r.PutV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, utils.ProgressReader(arch, int(img.Size), out, sf, false, utils.TruncateID(img.ID), "Pushing"), auth) if err != nil { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) return err From a5cc1c556dcd6dfe5c1aed4bec95aaff6ddfad61 Mon Sep 17 00:00:00 2001 From: unclejack Date: Sat, 24 Jan 2015 08:43:03 +0200 Subject: [PATCH 038/653] pkg/archive: remove tar autodetection log line Signed-off-by: Cristian Staretu --- pkg/archive/archive.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 35566520b1..68e5c1d300 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -101,7 +101,6 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { if err != nil { return nil, err } - log.Debugf("[tar autodetect] n: %v", bs) compression := DetectCompression(bs) switch compression { From 471006c02f03d9451721dfbd7e3d62f0db0fc7c7 Mon Sep 17 00:00:00 2001 From: DiuDiugirl Date: Sat, 24 Jan 2015 15:05:44 +0800 Subject: [PATCH 039/653] Fix a minor typo Docker inspect can also be used on images, this patch fixed the minor typo in file docker/flags.go and docs/man/docker.1.md Signed-off-by: DiuDiugirl --- docker/flags.go | 2 +- docs/man/docker.1.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/flags.go b/docker/flags.go index 3b54612e89..d91a9a1de7 100644 --- a/docker/flags.go +++ b/docker/flags.go @@ -97,7 +97,7 @@ func init() { {"images", "List images"}, {"import", "Create a new filesystem image from the contents of a tarball"}, {"info", "Display system-wide information"}, - {"inspect", "Return low-level information on a container"}, + {"inspect", "Return low-level information on a container or image"}, {"kill", "Kill a running container"}, {"load", "Load an image from a tar archive"}, {"login", "Register or log in to a Docker registry server"}, diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index 3b4367b07a..456680b52f 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -144,7 +144,7 @@ unix://[/path/to/socket] to use. Display system-wide information **docker-inspect(1)** - Return low-level information on a container + Return low-level information on a container or image **docker-kill(1)** Kill a running container (which includes the wrapper process and everything From 71763636f232477b0c890e0506b7b858505f1b33 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Sat, 24 Jan 2015 10:21:11 -0800 Subject: [PATCH 040/653] Updated image spec docs to clarify image JSON The title `Image JSON Schema` was used as a header in the section which describes the layout and fields of the image metadata JSON file. It was pointed out that `JSON Schema` is its own term for describing JSON in a machine-and-human-readable format, while the word "Schema" in this context was used more generically to say that the section is meant to be an example and outline of the Image JSON. http://spacetelescope.github.io/understanding-json-schema/ This section now has the title `Image JSON Description` in order to not cause this confusion. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- image/spec/v1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/image/spec/v1.md b/image/spec/v1.md index e4450283a7..abed75833b 100644 --- a/image/spec/v1.md +++ b/image/spec/v1.md @@ -114,7 +114,7 @@ This specification uses the following terms: -## Image JSON Schema +## Image JSON Description Here is an example image JSON file: From d477d42dd3e6a98f82c6ed2abb1dfeeb2782539f Mon Sep 17 00:00:00 2001 From: Euan Date: Sat, 24 Jan 2015 13:08:47 -0800 Subject: [PATCH 041/653] Allow empty layer configs in manifests Before the V2 registry changes, images with no config could be pushed. This change fixes a regression that made those images not able to be pushed to a registry. Signed-off-by: Euan Kemp --- graph/manifest.go | 8 +++--- integration-cli/docker_cli_push_test.go | 36 +++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 5 deletions(-) diff --git a/graph/manifest.go b/graph/manifest.go index 3d4ab1c5de..18784bb1e0 100644 --- a/graph/manifest.go +++ b/graph/manifest.go @@ -3,7 +3,6 @@ package graph import ( "bytes" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -71,14 +70,13 @@ func (s *TagStore) newManifest(localName, remoteName, tag string) ([]byte, error if err != nil { return nil, err } - if layer.Config == nil { - return nil, errors.New("Missing layer configuration") - } manifest.Architecture = layer.Architecture manifest.FSLayers = make([]*registry.FSLayer, 0, 4) manifest.History = make([]*registry.ManifestHistory, 0, 4) var metadata runconfig.Config - metadata = *layer.Config + if layer.Config != nil { + metadata = *layer.Config + } for ; layer != nil; layer, err = layer.GetParent() { if err != nil { diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index 484e5db70b..0b2decde70 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -2,10 +2,14 @@ package main import ( "fmt" + "io/ioutil" + "os" "os/exec" "strings" "testing" "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // pulling an image from the central registry should work @@ -80,3 +84,35 @@ func TestPushInterrupt(t *testing.T) { logDone("push - interrupted") } + +func TestPushEmptyLayer(t *testing.T) { + defer setupRegistry(t)() + repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) + emptyTarball, err := ioutil.TempFile("", "empty_tarball") + if err != nil { + t.Fatalf("Unable to create test file: %v", err) + } + tw := tar.NewWriter(emptyTarball) + err = tw.Close() + if err != nil { + t.Fatalf("Error creating empty tarball: %v", err) + } + freader, err := os.Open(emptyTarball.Name()) + if err != nil { + t.Fatalf("Could not open test tarball: %v", err) + } + + importCmd := exec.Command(dockerBinary, "import", "-", repoName) + importCmd.Stdin = freader + out, _, err := runCommandWithOutput(importCmd) + if err != nil { + t.Errorf("import failed with errors: %v, output: %q", err, out) + } + + // Now verify we can push it + pushCmd := exec.Command(dockerBinary, "push", repoName) + if out, _, err := runCommandWithOutput(pushCmd); err != nil { + t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) + } + logDone("push - empty layer config to private registry") +} From 77f840fb8bc679b8d0c3eb4ce7f443f904b7f9ac Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sun, 9 Nov 2014 19:59:57 +0000 Subject: [PATCH 042/653] Proposal for an improved project structure. Note: this deprecates the fine-grained, high-overlap cascading MAINTAINERS files, and replaces them with a single top-level file, using a new structure: * More coarse grained subsystems with dedicated teams of maintainers * Core maintainers with a better-defined role and a wider scope (if it's not in a subsystem, it's up to the core maintainers to figure it out) * Architects * Operators This is work in progress, the goal is to start a conversation Signed-off-by: Solomon Hykes Signed-off-by: Erik Hollensbe Signed-off-by: Arnaud Porterie Signed-off-by: Tibor Vass Signed-off-by: Victor Vieux Signed-off-by: Vincent Batts --- MAINTAINERS | 494 ++++++++++++++++++++++++++++++++++++++++- README.md | 8 + project/MAINTAINERS.md | 147 ------------ 3 files changed, 493 insertions(+), 156 deletions(-) delete mode 100644 project/MAINTAINERS.md diff --git a/MAINTAINERS b/MAINTAINERS index 2947eb355e..da7a2c851f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1,9 +1,485 @@ -Solomon Hykes (@shykes) -Victor Vieux (@vieux) -Michael Crosby (@crosbymichael) -.mailmap: Tianon Gravi (@tianon) -.travis.yml: Tianon Gravi (@tianon) -AUTHORS: Tianon Gravi (@tianon) -Dockerfile: Tianon Gravi (@tianon) -Makefile: Tianon Gravi (@tianon) -.dockerignore: Tianon Gravi (@tianon) +# Docker maintainers file +# +# This file describes who runs the Docker project and how. +# This is a living document - if you see something out of date or missing, +# speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. + +[Rules] + + [Rules.maintainers] + + title = "What is a maintainer?" + + text = """ +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. +""" + + [Rules.bdfl] + + title = "The Benevolent dictator for life (BDFL)" + + text = """ +Docker follows the timeless, highly efficient and totally unfair system +known as [Benevolent dictator for +life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with +yours truly, Solomon Hykes, in the role of BDFL. This means that all +decisions are made, by default, by Solomon. Since making every decision +myself would be highly un-scalable, in practice decisions are spread +across multiple maintainers. + +Ideally, the BDFL role is like the Queen of England: awesome crown, but not +an actual operational role day-to-day. The real job of a BDFL is to NEVER GO AWAY. +Every other rule can change, perhaps drastically so, but the BDFL will always +be there, preserving the philosophy and principles of the project, and keeping +ultimate authority over its fate. This gives us great flexibility in experimenting +with various governance models, knowing that we can always press the "reset" button +without fear of fragmentation or deadlock. See the US congress for a counter-example. + +BDFL daily routine: + +* Is the project governance stuck in a deadlock or irreversibly fragmented? + * If yes: refactor the project governance +* Are there issues or conflicts escalated by core? + * If yes: resolve them +* Go back to polishing that crown. +""" + + [Rules.decisions] + + title = "How are decisions made?" + + text = """ +Short answer: EVERYTHING IS A PULL REQUEST. + +Docker is an open-source project with an open design philosophy. This +means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, road map, and APIs. *If it's +part of the project, it's in the repo. If it's in the repo, it's part of +the project.* + +As a result, all decisions can be expressed as changes to the +repository. An implementation change is a change to the source code. An +API change is a change to the API specification. A philosophy change is +a change to the philosophy manifesto, and so on. + +All decisions affecting Docker, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. See *review flow* +for details. + +Because Docker is such a large and active project, it's important for everyone to know +who is responsible for deciding what. That is determined by a precise set of rules. + +* For every *decision* in the project, the rules should designate, in a deterministic way, +who should *decide*. + +* For every *problem* in the project, the rules should designate, in a deterministic way, +who should be responsible for *fixing* it. + +* For every *question* in the project, the rules should designate, in a deterministic way, +who should be expected to have the *answer*. +""" + + [Rules.review] + + title = "Review flow" + + text = """ +Pull requests should be processed according to the following flow: + +* For each subsystem affected by the change, the maintainers of the subsystem must approve or refuse it. +It is the responsibility of the subsystem maintainers to process patches affecting them in a timely +manner. + +* If the change affects areas of the code which are not part of a subsystem, +or if subsystem maintainers are unable to reach a timely decision, it must be approved by +the core maintainers. + +* If the change affects the UI or public APIs, or if it represents a major change in architecture, +the architects must approve or refuse it. + +* If the change affects the operations of the project, it must be approved or rejected by +the relevant operators. + +* If the change affects the governance, philosophy, goals or principles of the project, +it must be approved by BDFL. +""" + + [Rules.DCO] + + title = "Helping contributors with the DCO" + + text = """ +The [DCO or `Sign your work`]( +https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some Docker contributors are not as familiar with `git`, or have used a web based +editor, and thus asking them to `git commit --amend -s` is not the best way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. The +most trivial way for a contributor to allow the maintainer to do this, is to add +a DCO signature in a Pull Requests's comment, or a maintainer can simply note that +the change is sufficiently trivial that it does not substantivly change the existing +contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. +""" + + [Rules.holiday] + + title = "I'm a maintainer, and I'm going on holiday" + + text = """ +Please let your co-maintainers and other contributors know by raising a pull +request that comments out your `MAINTAINERS` file entry using a `#`. +""" + + [Rules."no direct push"] + + title = "I'm a maintainer. Should I make pull requests too?" + + text = """ +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. +""" + + [Rules.meta] + + title = "How is this process changed?" + + text = "Just like everything else: by making a pull request :)" + +# Current project organization +[Org] + + bdfl = "shykes" + + # The chief architect is responsible for the overall integrity of the technical architecture + # across all subsystems, and the consistency of APIs and UI. + # + # Changes to UI, public APIs and overall architecture (for example a plugin system) must + # be approved by the chief architect. + "Chief Architect" = "shykes" + + # The Chief Operator is responsible for the day-to-day operations of the project including: + # - facilitating communications amongst all the contributors; + # - tracking release schedules; + # - managing the relationship with downstream distributions and upstream dependencies; + # - helping new contributors to get involved and become successful contributors and maintainers + # + # The role is also responsible for managing and measuring the success of the overall project + # and ensuring it is governed properly working in concert with the Docker Governance Advisory Board (DGAB). + # + # We are currently looking for a chief operator. Get in touch if you're interested! + # + "Chief Operator" = "spf13" + + [Org.Operators] + + # The operators make sure the trains run on time. They are responsible for overall operations + # of the project. This includes facilitating communication between all the participants; helping + # newcomers get involved and become successful contributors and maintainers; tracking the schedule + # of releases; managing the relationship with downstream distributions and upstream dependencies; + # define measures of success for the project and measure progress; Devise and implement tools and + # processes which make contributors and maintainers happier and more efficient. + + + [Org.Operators.security] + + people = [ + "erw" + ] + + [Org.Operators."monthly meetings"] + + people = [ + "sven", + "tianon" + ] + + [Org.Operators.infrastructure] + + people = [ + "jess", + "crosbymichael" + ] + + # The chief maintainer is responsible for all aspects of quality for the project including + # code reviews, usability, stability, security, performance, etc. + # The most important function of the chief maintainer is to lead by example. On the first + # day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll + # be fine". + "Chief Maintainer" = "crosbymichael" + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + # For each release (including minor releases), a "release captain" is assigned from the + # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure + # the release process is clear and up-to-date. + # + # It is common for core maintainers to "branch out" to join or start a subsystem. + + + + people = [ + "unclejack", + "crosbymichael", + "erikh", + "icecrime", + "jfrazelle", + "lk4d4", + "tibor", + "vbatts", + "vieux", + "vish" + ] + + + [Org.Subsystems] + + # As the project grows, it gets separated into well-defined subsystems. Each subsystem + # has a dedicated group of maintainers, which are dedicated to that subsytem and responsible + # for its quality. + # This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows. + # + # The maintainers of each subsytem are responsible for: + # + # 1. Exposing a clear road map for improving their subsystem. + # 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem. + # 3. Be available to anyone with questions, bug reports, criticism etc. + # on their component. This includes IRC, GitHub requests and the mailing + # list. + # 4. Make sure their subsystem respects the philosophy, design and + # road map of the project. + # + # #### How to review patches to your subsystem + # + # Accepting pull requests: + # + # - If the pull request appears to be ready to merge, give it a `LGTM`, which + # stands for "Looks Good To Me". + # - If the pull request has some small problems that need to be changed, make + # a comment adressing the issues. + # - If the changes needed to a PR are small, you can add a "LGTM once the + # following comments are adressed..." this will reduce needless back and + # forth. + # - If the PR only needs a few changes before being merged, any MAINTAINER can + # make a replacement PR that incorporates the existing commits and fixes the + # problems before a fast track merge. + # + # Closing pull requests: + # + # - If a PR appears to be abandoned, after having attempted to contact the + # original contributor, then a replacement PR may be made. Once the + # replacement PR is made, any contributor may close the original one. + # - If you are not sure if the pull request implements a good feature or you + # do not understand the purpose of the PR, ask the contributor to provide + # more documentation. If the contributor is not able to adequately explain + # the purpose of the PR, the PR may be closed by any MAINTAINER. + # - If a MAINTAINER feels that the pull request is sufficiently architecturally + # flawed, or if the pull request needs significantly more design discussion + # before being considered, the MAINTAINER should close the pull request with + # a short explanation of what discussion still needs to be had. It is + # important not to leave such pull requests open, as this will waste both the + # MAINTAINER's time and the contributor's time. It is not good to string a + # contributor on for weeks or months, having them make many changes to a PR + # that will eventually be rejected. + + [Org.Subsystems.Documentation] + + people = [ + "fredlf", + "james", + "sven", + ] + + [Org.Subsystems.libcontainer] + + people = [ + "crosbymichael", + "vmarmol", + "mpatel", + "jnagal", + "lk4d4" + ] + + [Org.Subsystems.registry] + + people = [ + "dmp42", + "vbatts", + "joffrey", + "samalba" + ] + + [Org.Subsystems."build tools"] + + people = [ + "shykes", + "tianon" + ] + + [Org.Subsystem."remote api"] + + people = [ + "vieux" + ] + + [Org.Subsystem.swarm] + + people = [ + "aluzzardi", + "vieux" + ] + + [Org.Subsystem.machine] + + people = [ + "bfirsh", + "ehazlett" + ] + + [Org.Subsystem.compose] + + people = [ + "aanand" + ] + + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aanand] + Name = "Aanand Prasad" + Email = "aanand@docker.com" + Github = "aanand" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "aluzzardi@docker.com" + Github = "aluzzardi" + + [people.bfirsh] + Name = "Ben Firshman" + Email = "ben@firshman.co.uk" + Github = "bfirsh" + + [people.ehazlett] + Name = "Evan Hazlett" + Email = "ejhazlett@gmail.com" + Github = "ehazlett" + + [people.erikh] + Name = "Erik Hollensbe" + Email = "erik@docker.com" + Github = "erikh" + + [people.erw] + Name = "Eric Windisch" + Email = "eric@windisch.us" + Github = "ewindisch" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "arnaud@docker.com" + Github = "icecrime" + + [people.jfrazelle] + Name = "Jessie Frazelle" + Email = "jess@docker.com" + Github = "jfrazelle" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + Github = "lk4d4" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + Github = "shykes" + + [people.sven] + Name = "Sven Dowideit" + Email = "SvenDowideit@home.org.au" + GitHub = "SvenDowideit" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + Github = "tiborvass" + + [people.vbatts] + Name = "Vincent Batts" + Email = "vbatts@redhat.com" + Github = "vbatts" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + Github = "vieux" + + [people.vmarmol] + Name = "Victor Marmol" + Email = "vmarmol@google.com" + Github = "vmarmol" + + [people.jnagal] + Name = "Rohit Jnagal" + Email = "jnagal@google.com" + Github = "rjnagal" + + [people.mpatel] + Name = "Mrunal Patel" + Email = "mpatel@redhat.com" + Github = "mrunalp" + + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + Github = "unclejack" + + [people.vish] + Name = "Vishnu Kannan" + Email = "vishnuk@google.com" + Github = "vishh" + + [people.spf13] + Name = "Steve Francia" + Email = "steve.francia@gmail.com" + Github = "spf13" diff --git a/README.md b/README.md index b372c25d47..b4c0276f5a 100644 --- a/README.md +++ b/README.md @@ -195,6 +195,14 @@ Don't know how to use that super cool new feature in the master build? Check out the master docs at [docs.master.dockerproject.com](http://docs.master.dockerproject.com). +How the project is run +====================== + +Docker is a very, very active project. If you want to learn more about how it is run, +or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/project). + +We are always open to suggestions on process improvements, and are always looking for more maintainers. + ### Legal *Brought to you courtesy of our legal counsel. For more context, diff --git a/project/MAINTAINERS.md b/project/MAINTAINERS.md deleted file mode 100644 index 1a27c9224c..0000000000 --- a/project/MAINTAINERS.md +++ /dev/null @@ -1,147 +0,0 @@ -# The Docker Maintainer manual - -## Introduction - -Dear maintainer. Thank you for investing the time and energy to help -make Docker as useful as possible. Maintaining a project is difficult, -sometimes unrewarding work. Sure, you will get to contribute cool -features to the project. But most of your time will be spent reviewing, -cleaning up, documenting, answering questions, and justifying design -decisions - while everyone has all the fun! But remember - the quality -of the maintainers' work is what distinguishes the good projects from -the great. So please be proud of your work, even the unglamourous parts, -and encourage a culture of appreciation and respect for *every* aspect -of improving the project - not just the hot new features. - -This document is a manual for maintainers old and new. It explains what -is expected of maintainers, how they should work, and what tools are -available to them. - -This is a living document - if you see something out of date or missing, -speak up! - -## What is a maintainer's responsibility? - -It is every maintainer's responsibility to: - -1. Expose a clear road map for improving their component. -2. Deliver prompt feedback and decisions on pull requests. -3. Be available to anyone with questions, bug reports, criticism etc. - on their component. This includes IRC, GitHub requests and the mailing - list. -4. Make sure their component respects the philosophy, design and - road map of the project. - -## How are decisions made? - -Short answer: with pull requests to the Docker repository. - -Docker is an open-source project with an open design philosophy. This -means that the repository is the source of truth for EVERY aspect of the -project, including its philosophy, design, road map, and APIs. *If it's -part of the project, it's in the repo. If it's in the repo, it's part of -the project.* - -As a result, all decisions can be expressed as changes to the -repository. An implementation change is a change to the source code. An -API change is a change to the API specification. A philosophy change is -a change to the philosophy manifesto, and so on. - -All decisions affecting Docker, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do -this (see below "Who decides what?") - + Accepting pull requests - - If the pull request appears to be ready to merge, give it a `LGTM`, which - stands for "Looks Good To Me". - - If the pull request has some small problems that need to be changed, make - a comment adressing the issues. - - If the changes needed to a PR are small, you can add a "LGTM once the - following comments are adressed..." this will reduce needless back and - forth. - - If the PR only needs a few changes before being merged, any MAINTAINER can - make a replacement PR that incorporates the existing commits and fixes the - problems before a fast track merge. - + Closing pull requests - - If a PR appears to be abandoned, after having attempted to contact the - original contributor, then a replacement PR may be made. Once the - replacement PR is made, any contributor may close the original one. - - If you are not sure if the pull request implements a good feature or you - do not understand the purpose of the PR, ask the contributor to provide - more documentation. If the contributor is not able to adequately explain - the purpose of the PR, the PR may be closed by any MAINTAINER. - - If a MAINTAINER feels that the pull request is sufficiently architecturally - flawed, or if the pull request needs significantly more design discussion - before being considered, the MAINTAINER should close the pull request with - a short explanation of what discussion still needs to be had. It is - important not to leave such pull requests open, as this will waste both the - MAINTAINER's time and the contributor's time. It is not good to string a - contributor on for weeks or months, having them make many changes to a PR - that will eventually be rejected. - -## Who decides what? - -All decisions are pull requests, and the relevant maintainers make -decisions by accepting or refusing pull requests. Review and acceptance -by anyone is denoted by adding a comment in the pull request: `LGTM`. -However, only currently listed `MAINTAINERS` are counted towards the -required majority. - -Docker follows the timeless, highly efficient and totally unfair system -known as [Benevolent dictator for -life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with -yours truly, Solomon Hykes, in the role of BDFL. This means that all -decisions are made, by default, by Solomon. Since making every decision -myself would be highly un-scalable, in practice decisions are spread -across multiple maintainers. - -The relevant maintainers for a pull request can be worked out in 2 steps: - -* Step 1: Determine the subdirectories affected by the pull request. This - might be `src/registry`, `docs/source/api`, or any other part of the repo. - -* Step 2: Find the `MAINTAINERS` file which affects this directory. If the - directory itself does not have a `MAINTAINERS` file, work your way up - the repo hierarchy until you find one. - -There is also a `hacks/getmaintainers.sh` script that will print out the -maintainers for a specified directory. - -### I'm a maintainer, and I'm going on holiday - -Please let your co-maintainers and other contributors know by raising a pull -request that comments out your `MAINTAINERS` file entry using a `#`. - -### I'm a maintainer. Should I make pull requests too? - -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. - -### Helping contributors with the DCO - -The [DCO or `Sign your work`]( -https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) -requirement is not intended as a roadblock or speed bump. - -Some Docker contributors are not as familiar with `git`, or have used a web based -editor, and thus asking them to `git commit --amend -s` is not the best way forward. - -In this case, maintainers can update the commits based on clause (c) of the DCO. The -most trivial way for a contributor to allow the maintainer to do this, is to add -a DCO signature in a Pull Requests's comment, or a maintainer can simply note that -the change is sufficiently trivial that it does not substantivly change the existing -contribution - i.e., a spelling change. - -When you add someone's DCO, please also add your own to keep a log. - -### Who assigns maintainers? - -Solomon has final `LGTM` approval for all pull requests to `MAINTAINERS` files. - -### How is this process changed? - -Just like everything else: by making a pull request :) From 6bbf19dfbe44a06dadb755e08069b5ee031f5312 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 26 Jan 2015 08:11:20 +0200 Subject: [PATCH 043/653] docs: shrink sprites-small_360.png Signed-off-by: Cristian Staretu --- .../mkdocs/img/footer/sprites-small_360.png | Bin 20957 -> 2763 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/docs/theme/mkdocs/img/footer/sprites-small_360.png b/docs/theme/mkdocs/img/footer/sprites-small_360.png index c28863e3f5c7ceb64b5a69345c757da563de85ed..92af5c7092f635532ab5d7ce695f5b33779a212c 100644 GIT binary patch literal 2763 zcmeH|`9Bkm1IC9O(cDL)FjttS9CI@v<`~MEBXx+({y5!)7u` z6n%wUxxN@`!q@-t{XDPN^E|)5U$5u&q*x)1f$YNU0001pFfqJy-ZSUE%*J${O@f0y z0085zmAQ?Pxp&~%zxtQJ{|y0>-o>2r4bVgGAnyXW7+JsS^-+MaW8;#X7bd9;Xl{G@ zf0>G>*uE8fDrD#VeSFEQR%V`-p~xci_r&7Q#Vro8kJ{UDpvO|M-t_}v=~s0>gRDx| z67hG9??K#VjdJy`A-i5is+wvI^1t?c5O1SU4kgOUAyTYM7MlD9RCtn>yqO8Zt1ENj zp@;HSMH1?#{ZEE&Rs`#&x%fnE-srq_zfmCndcv+*p0Cnn%%iI_vg-|x8h+?_ zxOY^4a4@(0Cmrh)*81)({F!Y4Dp?~Wy*f)90ANW+80y=EGZ1HOO3v-AAZAZrxi8}1)I3YEA@bvwp z73|PA{Lc2{X~~&O`?L`2UD_4QS$yzPrnsfhxODbB1%6h*?*=2-P*yVT(7r#%13BU? zRxkofw)lF3)5l*zN` zCKMVa);ZoIP3a5I_kL1IOIfip88U23lJ@=$egnNWxukhI#t){L=L%_>?TblxzgcX& zJ^4|DJ-wZRNU#u%V3NnbM4U-|&ayY||M<|QLL#ql?fbLVQ~&%%BXq5<%})y%g&Dpqa39yNmmPDkIp-MTp=jIN~uPg7h z21`66e=*<&nX|SsinB?HOw`}t1c)x#&&#N7w7X`5)xQY9_VOM5J(Yj9HB!djM=E** z*C;Wk29U;@HL@u@R<)HDJ{*4Ea+24-72HODt$=YCvr-%ksFDYiIi9{7ISLIZ>`{odwC) zD-<5U(t;+4e{6&NN9wwdjh}-K`wNW%fD!qHs@&!CI>y*UTT@*2jZJb(L35Qyw63(6 z3+4k!aqm|y(sL5k!K~I)J5XFu(3CXXpt@N5h*2Sjd2oY)tiUj%Stj`h2iosWKE3&MPKgpJm{@0y?vjb7E5YS{_6E zt2&N{>jp3edE$wkT9=f6`0UW7-cq*{w!>c@jR0gZnH+6piv=B-sjTi)V)NEkzm19o zN*Hw5fMaMXs;p=phoU^9?y-~Df`HozZA@^Go?0KF-X{{MTw2&Fgbq`2;z@IXcG6Zo zU(3;nCu`f`R}m$njjt@y!sarIZNml(`9HTeZAm%$e;b4m_m4>b*|R@JUQ(FjU*zO4sDFW85aQlI2Ee@KTQ!KqGye|-=< zt7Wn6R1B~+@x-u=W(V0^aVpDeQG`OFwjy~^CXNHQ>HTm)m7QL5;PaYT5Tzv&b+6EWMA!JT ztyW|_9Z7F>R|J(HqGXHA0}{UDp9%0Y)3?yhRnk<1By}&?WDz@*#anSx)P&om%cHpB ztO|J1hZp9HN^xuS` z!LdmRS0TRHeH&j~s&300j`|Z~esI3(edAt8ZVaWxH#K2LGc4P49cxaTI*@VS49inQ z%9QuY>GfL7_{ByWVn-8RykBc;-Vq|I0rACYr-ypS^Z7gAWRZ&Vg9adskcN%79>)C- D{E4;m literal 20957 zcmeI4c|4Tu+y5_HStF^W#ZK8~h8fFXWUFLNlC2nHFqWBN#-5O95m^#hQzB)}o^4tX zSt7Seh>+}L&-RQ~_uSp{`u<+e^Lw6u%EVjKW0h*0G1u=ZyF#jfg1prpJ3G04GipYL>$2$ zhv(B$SLegK;%qTaXaMl+$hzra5cQ6y=EKBND~lvQt)Uw3!i-qO7K89@vTC$id>1)5 z>7DLNh?VRz(l~skW#@K=H?%LzI8Hc)SWU1zx_@xQ`blv8N}zD3;(WKtc!RH9Qv)tZ zVNhYhr%8n!7~6hYZ0B7Ez?8F80{%QXG4@7-2!cl19@vg2ef;8j@BE1q+b5G*Xcq8W zIRO87S&IxK);05t>DR7az%ec$^$Wqv6nNnRqy~yc*U{vj1Hfk`V(Mvfc>t?=&ZtP5 zTtR@urGpm*z$Zl)4gg?uUJDKI77Sde8K|cR+VlZZQy)`50F?xCqb4;u0keaE=k!Gj z1mG+QG`y&<|3vpHjLu(kSY?rMwOZ!XP~8CY4J8N*0EE+OmKs8efz=Pu!p1t?NJapf zA_3(A8i4S)hEcq*w&9?L-lNOx;5s!;`%a;0SGq{iTg_~&$u^p{2_J+A=#zGuu6maqstSopkYfI%n?wVk+!Q(8fhI4{fPL4;^uS6pZUD}MAKXTklir;-#Nj#=nY?c*k z(so1t#ur__T^DZyOq2XKjL^Z<;ArgQ z0loQ2u1DoeY+C{Ut<3jncND1dUVB38M53`E?KsEJ_>_;+nxAPGE&IiA25A#kZKUvx z!+V@AI2$mOAwm26pW(U8dbkn-L-0Gjdt{X^BDgP}2GjCdv8r+is2%0LENkr_E6{$K z?qjg0pz1Mt&QKx#LzjUFK4zOkk9ZEL*GGuiup3@qzL22N#2*uJwja@Yos}OK0Xi7m zY9R99Qe2A63Ar|Qwxg&PhWkLHDocwskt#u|Ep=GS4uR_F*+oo@^#d*_B!3v$uBpXlhA$F0%6a76+xa)jk{Ox!Kp= z_W-{Y-&ms8scWHvrJvugUpw+KYwCdKHpF%i2~-o)BR;t_B|5o#vUnTc=Tq;edTKjT5?^U^^IhY-qTzf8))K<4)z3Nbob%(hkNh7^KeBz~o7!>Z zxHO+a_yw&xAtubuj0?nYVlXjqCINcHSTH_P`9*{cqSU`UqWpU2kxIP-Lf22U9s0xt zIxZm`_$bm)3zKSbTj@^uUBiqcMjpc!4=$T&WThTRIeTaBq{?fky>S81aW3vcQGoGL z$MmlBio36eY>AfM(r5HO9L`BkPFG3sGeq8h`v7mWASQ0~;8ffx?EP|M;fHY#zGg@n zNg8DtD5rU!aWe_cv`yP}yXKU6wsDrb;aGOZeYkqpS!F|IZQqKos(_Wis@d{qoy7vmT5z})A2WDox2KZFmh?WZE@KH52MpgJ@_lt&3q(8)?D$J z;z^%;_mDZ%{)DjTCX2*sco|XZGpoLx-h4t+{8{+Q=b5dK?`KD23&uISDjvTf9-W<; zRhvomR-9=XVH?Sx@tu{KDIQL4;gz=H_S}1!wTLGH=2_Qnvy@jodfZ*q=efZfaGL?L zq&D|GzTrjZ!6y6Y6HzTF)YZIrm&a4aecSrB@1To%C!(#(qjMlOQnyPdDDAy&nT|@_ zt$5+sEQlk-2bqm-f`Fhnu%C@;@nqhMy!(YEXsujgenUYg<{c`?Ou?Kf>p@nw`Q3J@ zc7c(^_6nIvnNGW7wrx!%j_UbS`E~i2+@WIhyCZM2A35jwD!;a|{nF4VQ)YXwyM6eaWyM2nyxveqxYs@7v!x=5b9yyUQx$%X!;p4kUdTLiErdR$E>AsC21w?CEaeQ$C{;C9%cPu9`hcSz31y!@-Ky4>g>B;YnddQbSr7AvSovG za-AT@ zx=A6iZJooV&3DG`#vJje23zl{-gj-0V}!R1GKWmiOc2NxJPSL2{+#b}?M2eXFBdDH z-@5XWGe=}d=t`tn)Rnrax}~V2gA9%DWpVh*>E*~pk6p2#7nXRhlqR*Qr>{edcDwA} zul*!lD0QFCVtr_TyX(=D3K_5r!E&J6y<5BzYHuECDfAAy&se z3uba`3!&9Gbw_Sb%APVAgzXXAc63D<-2M5H%NvoWB6Vqs@xHRW2Ty}Q61;DveYB^I=h|DdCW<9H+)JNkSkJrH zyI0Ru)?cn~-!GylJGNjZocZ|7m}QsIUGx0xPknl;O?%7EdBDco-=8U3dZ~XWd3=oN z-D?X+WAnV2TshoCzVFo1Q z&hP#pzNfeSNPLa5Zf4peLA-`b{{ZMvRi^^a`GUUK_jxsaX|;}eV7(i%f{hG~f|5^f zR<@_McSj*b6+4vP>n%GhId&a-cJi-um_ANPUE$<8+hwQTcP;LirAdV+r2%1@``MdC zldAqwD9&F}TL{wL9SBOA_%Z_bsT*m{czGa3|Ex<{tLt9_4%4tfN73_y)iI4?ZI5ZU zHzQffwmpp@#q!13-ARP9dA-g=ACJKgV`a5f^B*f0R^zvNvDX~f zrTkPy%r|qcB30A9|SA8>#ZwTBeut)Yk>eO20FEwf5~fC+H)y>b{~l=HWJx*zIxc zNGM`4d=@#18WJ7T--;;n9se3RIk5QlW6txO=noQAg}&@-tM*Ec=Pc@@U1$J+4uC4} z?{VG_C^PxD&>cQ;>qVX4ppT8%(3)J*i&g;Wp*0v};W$mt(XuV~;eJv9qksE04hA5v z(jN{Nd-EYWe*81IS2K#HMj+dqN@W(;hd$AHaKgvl&3SEeDnnXB^6IR z66J^{@>!$pFj!@QiLxpIK8%gBz-gE+SQoF3w#OXvc154`)-ytRJE9b91XK{rN}h0X z0B1B2$>-_pgeAZ|l?A@Vg_FOpH-iNDzI7owDhsHtC*(8HHQ-anxuW@CQZPvrSQ^47 zryvD^!Q|wmCHSPl5GV)?0YPLWAqsFP6b=US{dfr=n91LiTy1ROh8jnHq$69(0`^2A z9u5L|cz8&8$VlN_?LZI(1qBdT8YC?(N$w#@@WK+2o|0IC;P)Uu<7l7>C|3-gh{0j` z*5e|rac)Fq0fF^Izg|Da<&6K82ut|Ej!Y5ciNu4*cVy6?L^h~j8oZmU)3-~rL4nXt zXlFE*NFeJVf7IcBK5qPy|1IS&_rE44&xx+?pT$3~*V*}xsR=|)cQS$>NdK1hr;=dg zg-3%7(FB~ED+;aYP9BQjpGHr_*#4fJzeQU2{Oh%&Ju&|xTlajIeOn6OXGn=WR=Bz= z8cD>t8sTtGi0`ZH_Y)iWfo*+N@g3AfqA=L?$GAgEAWG5i8UGy*t$`$>5$jq>u!5vC z)CdBBLuBC)uq0THye>Bc`QDQvhAz$qW9#*Y7}7FuC}d-d4LvDikav&`l8F3|qHOHD zA&w0SZi{nuMiLPiXQUk(gvZ(`fqpk`==p11!PRk2I9Kvzp%F4lpg%SLQAK_>Hu(N% zjL^Uk@J>jtjp7a74eDgM|HA)f&mIX^f6%^zp zts!U|Nf{_i0VZdw0Fj5FzfFOz?ne1PLTlhqZtJ^^9Qu1j&}WjnGEujBguDSv?~E&>xy&cL*nsH7!-1SVnOa$n;*BbU;BZN zh~wK(D83JOLyZ5$u@aE(|Gh=`pVg56-lF?kHUGa{bia2k${vZeL)#!gKR4{phW~8t zzOUZDSCXIA>7SKHY5isnCs!@<4*IsJ5D+EMKiYn0{@&kYz4|!0eXA~V(qQrn1X=(4 zL%PvTDK(`o-RP#2{#KsCVu%RoAK56HH>lQ69N%w-C&`CnG(r{zlYuFLD4Qu21{hDY zlZgg~eBU9g*H38(`3~|;MCtjX(sY9ox>2~n@k981t~Q)wzm=Ttt>jZO`G5}kbwd9~ zHTqSy|0lov9?}0~REk)eNhp9&H#c!n=R@J$#6A&CqRxlHyNQbe2z7H47j-@q-c4K-K&YFWxTy1?@NVLw07Bi|#6_JCg?AGd z1rX}yCNAoHD7>4vD1cBmH*rztL*d=TMFE7mxrvK99}4d#E(##j%}rd?`A~Q_aZv!F zZf@eD&WFOgiHiaVb#oIJbv_i{OEcu779^_xNTJPI6N&Yn~AL^K)E&w>Y0RZ7P04#R_K%fl(;6DSvs67CH5dgpu z-amS4nEbC|oV7GmjXXO>6TAveo!Vcka(%vs{~5|am6PiP={ZyNA>p{T%)?sMOj!j% zNQWC&)g8iaq}{uGHO>Bxuv(qj{g)(qdYC2`7so3OHMAl9EiY2B%J_WET&7Qx@*v^> z=cM^aw$Hf2(r86a9KdW9@Fs_K zf!ToR8bcDTabA==_XEZ-c6z<7@HYmeIr>k)#Mt4Hh5fg;;`bTwF0h(Rz%stfU*v2j zimoJ|yp<}=@`n5Nz^-AB?5y6=v2!b80sN$QBxsLF&%vHYw;N}7WnUo$CrT7|8Ak8? zeDywq;sJR_7UtcD&3nXpE`Uxr^0E}jLE~dyrO|{O&|i4=e>n8hO7!D58Jtl^qpumIPSsc*R=+DzK8H=?YNS-se=Rn9V-QnUL(T zWmWoI7@Q=3TRfN1lm6{ad+U<9yp&N4)(;LA=;KZ|jg72Lt5$Oo;^!aQ8O1O&WL544UpZ< zu);2dT=mh8+cN_>W2kNE!zW**(VDmCYnD#Sr-ND{ookF*Wn88q?+XWpYA!)v^EZ+PP*a>1Y7%9(Q^%og8L}wx@@6Om55hGl76Z zuu-!CEcx+^6hhI3{ zyicr%b=a8xTXKF{@KF1fe)|#HVk#shmCUV;i;+0f()@Kiev%u13!kAFxj7{0` z#4%ac2%=79b0)3v{=;R`TKef0x1$Vad|fAR58ku)vv{h;*1=30oMM@jBu6(UJYH+MVuANI8~3}ISO6U-qpR@y^yR8 zw`|2m>fInlGvwr?TEY!xt?_G0=A@D-sT(s`w&lBvk$QD}R{6}C&gK(dhiK;BB=wa)d5mSp=<8#egLo$O+wD(0ZXrwt6L*PR4&X9nDB@by94v*Ad!g zc}B>6(qmz!H}*Y|H~x}|CMnuZ-aJHSNTgp*8qt=`6y zt|xMAS@6C(=q=CakU8(oKJN`H_*nmX|F+cxx_ubak{7BZ7>PHZ(M0$7m3FV!?1x%q zdD+ai=+IQ2NnJ4y5b@#XoPEsY$+1uzReluk(HOTJ)%C2=YCYF_4oOlnwZDF>Kc|pk zJe`Ev^YtB2?r%8twi`uKfh8@5~H-}bbk-{*~} zQ|I)P38WXBfqy&QZjr+|g`~w`kB9!XY>Dmf#QHOI+2p<)k2yG-S6%UfXR>TpNqS%r z9h+jSq1Ux9@ks`cxVBCYD<;1cta((P;Hr0*bGneL@v zg749-gJu!pd&N^<&3UYf>)Y|v>G0E?5Bq3dEnimsSucK~+Z{g54(0UBKD^|ZczX;T zVx*g$`^AMtLQ}#*u7`aU>s<1-E#$0d4Rh+fRni2jo&G`Kkjj?x_6g^2-5S`}6L({! zxv0izmqL~JRCNKtx+!PwG25KQ**vSl$%F@n4R&{;jT6+C%6Cb$hzgs%jC)1+RJXmz zV!E#4>Wn{f$y0o+A}iPI?TU$$%BY@TbYo%4o@uL>7ZTKhQe(7P^NrCuw6YIX&!E(` z{5-^v=x{^N%;&3ST4v*R<&v69dDtcU8W+8Y0{Ek}sz_)$$xL7Wpm@Ds9K88vQ2X(( zzE{6iPdlakpCDQBW2s=m?)aKTq z5qab&;rh<@PlekD#Pz{brm${1SZpni`2}h7oX26(MMN%Vt(w)jBF*DV`%o{Un{9k) z+)`J%N1VOV73iyCJ4YamUDynMZ-{eM1BdPH-mekcqH7lgc67NPzgoI&YF~5Vn#r*E zadZ(!;HZdD)=(sH3OS-?QyF62B-)~8p0iXJJ=+L2^SWRW?N{h!zuR@-H*2*pmmL>J)<&v++ zPcg6@d Date: Fri, 23 Jan 2015 17:15:35 -0500 Subject: [PATCH 044/653] cleanup of docker tag command code Signed-off-by: Shishir Mahajan --- graph/service.go | 3 +-- graph/tag.go | 25 ------------------------- 2 files changed, 1 insertion(+), 27 deletions(-) diff --git a/graph/service.go b/graph/service.go index 675e12a1a9..fda99902ca 100644 --- a/graph/service.go +++ b/graph/service.go @@ -12,8 +12,7 @@ import ( func (s *TagStore) Install(eng *engine.Engine) error { for name, handler := range map[string]engine.Handler{ "image_set": s.CmdSet, - "image_tag": s.CmdTag, - "tag": s.CmdTagLegacy, // FIXME merge with "image_tag" + "tag": s.CmdTag, "image_get": s.CmdGet, "image_inspect": s.CmdLookup, "image_tarlayer": s.CmdTarLayer, diff --git a/graph/tag.go b/graph/tag.go index 3d89422f9d..b33e49d593 100644 --- a/graph/tag.go +++ b/graph/tag.go @@ -2,34 +2,9 @@ package graph import ( "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/parsers" ) -// CmdTag assigns a new name and tag to an existing image. If the tag already exists, -// it is changed and the image previously referenced by the tag loses that reference. -// This may cause the old image to be garbage-collected if its reference count reaches zero. -// -// Syntax: image_tag NEWNAME OLDNAME -// Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0 func (s *TagStore) CmdTag(job *engine.Job) engine.Status { - if len(job.Args) != 2 { - return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name) - } - var ( - newName = job.Args[0] - oldName = job.Args[1] - ) - newRepo, newTag := parsers.ParseRepositoryTag(newName) - // FIXME: Set should either parse both old and new name, or neither. - // the current prototype is inconsistent. - if err := s.Set(newRepo, newTag, oldName, true); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job. -func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } From b4283209d55289abb2c5b63df949a27c2704f5af Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 23 Jan 2015 14:14:52 -0500 Subject: [PATCH 045/653] Fix bind-mounts only partially removed When calling delete on a bind-mount volume, the config file was bing removed, but it was not actually being removed from the volume index. Signed-off-by: Brian Goff --- volumes/repository.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/volumes/repository.go b/volumes/repository.go index 8219952243..91f98fc060 100644 --- a/volumes/repository.go +++ b/volumes/repository.go @@ -169,13 +169,11 @@ func (r *Repository) Delete(path string) error { return err } - if volume.IsBindMount { - return nil - } - - if err := r.driver.Remove(volume.ID); err != nil { - if !os.IsNotExist(err) { - return err + if !volume.IsBindMount { + if err := r.driver.Remove(volume.ID); err != nil { + if !os.IsNotExist(err) { + return err + } } } From 44f4c95c0ece887023d3ad7ab9c4f147c81d9d3f Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 26 Jan 2015 11:16:29 -0800 Subject: [PATCH 046/653] Remove omitempty json tags from stucts When unmarshaling the json response from the API in languages to a dynamic object having the omitempty field tag on types such as float64 case the key to be omitted on 0.0 values. Various langages will interpret this as a null when 0.0 is the actual value. This patch removes the omitempty tags on fields that are not structs where they can be safely omited. Signed-off-by: Michael Crosby --- api/stats/stats.go | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/api/stats/stats.go b/api/stats/stats.go index d58fdd4f5d..8edf18fe0e 100644 --- a/api/stats/stats.go +++ b/api/stats/stats.go @@ -6,21 +6,21 @@ import "time" type ThrottlingData struct { // Number of periods with throttling active - Periods uint64 `json:"periods,omitempty"` + Periods uint64 `json:"periods"` // Number of periods when the container hit its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + ThrottledPeriods uint64 `json:"throttled_periods"` // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time,omitempty"` + ThrottledTime uint64 `json:"throttled_time"` } // All CPU stats are aggregated since container inception. type CpuUsage struct { // Total CPU time consumed. // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage,omitempty"` + TotalUsage uint64 `json:"total_usage"` // Total CPU time consumed per core. // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + PercpuUsage []uint64 `json:"percpu_usage"` // Time spent by tasks of the cgroup in kernel mode. // Units: nanoseconds. UsageInKernelmode uint64 `json:"usage_in_kernelmode"` @@ -30,41 +30,41 @@ type CpuUsage struct { } type CpuStats struct { - CpuUsage CpuUsage `json:"cpu_usage,omitempty"` + CpuUsage CpuUsage `json:"cpu_usage"` SystemUsage uint64 `json:"system_cpu_usage"` ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` } type MemoryStats struct { // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` + Usage uint64 `json:"usage"` // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` + MaxUsage uint64 `json:"max_usage"` // TODO(vishh): Export these as stronger types. // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` + Stats map[string]uint64 `json:"stats"` // number of times memory usage hits limits. Failcnt uint64 `json:"failcnt"` Limit uint64 `json:"limit"` } type BlkioStatEntry struct { - Major uint64 `json:"major,omitempty"` - Minor uint64 `json:"minor,omitempty"` - Op string `json:"op,omitempty"` - Value uint64 `json:"value,omitempty"` + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` } type BlkioStats struct { // number of bytes tranferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` } type Network struct { From b3dfe1a63a51639400e691939b0879e9b0807703 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Mon, 26 Jan 2015 11:01:40 -0800 Subject: [PATCH 047/653] Add completion for stats. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- contrib/completion/bash/docker | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index 4891194bd8..1d553941bc 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -789,6 +789,10 @@ _docker_start() { esac } +_docker_stats() { + __docker_containers_running +} + _docker_stop() { case "$prev" in --time|-t) @@ -886,6 +890,7 @@ _docker() { save search start + stats stop tag top From b54305ae2330bcadd2ef657a17f4a87896031d1f Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Wed, 21 Jan 2015 21:40:19 -0500 Subject: [PATCH 048/653] Do not return err on symlink eval Signed-off-by: Brian Goff --- integration-cli/docker_cli_daemon_test.go | 74 +++++++++++++++++++++++ volumes/repository.go | 7 ++- 2 files changed, 78 insertions(+), 3 deletions(-) diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index 95188296d8..d17e8093ad 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -403,3 +403,77 @@ func TestDaemonKeyMigration(t *testing.T) { logDone("daemon - key migration") } + +// Simulate an older daemon (pre 1.3) coming up with volumes specified in containers +// without corrosponding volume json +func TestDaemonUpgradeWithVolumes(t *testing.T) { + d := NewDaemon(t) + + graphDir := filepath.Join(os.TempDir(), "docker-test") + defer os.RemoveAll(graphDir) + if err := d.StartWithBusybox("-g", graphDir); err != nil { + t.Fatal(err) + } + + tmpDir := filepath.Join(os.TempDir(), "test") + defer os.RemoveAll(tmpDir) + + if out, err := d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil { + t.Fatal(err, out) + } + + if err := d.Stop(); err != nil { + t.Fatal(err) + } + + // Remove this since we're expecting the daemon to re-create it too + if err := os.RemoveAll(tmpDir); err != nil { + t.Fatal(err) + } + + configDir := filepath.Join(graphDir, "volumes") + + if err := os.RemoveAll(configDir); err != nil { + t.Fatal(err) + } + + if err := d.Start("-g", graphDir); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(tmpDir); os.IsNotExist(err) { + t.Fatalf("expected volume path %s to exist but it does not", tmpDir) + } + + dir, err := ioutil.ReadDir(configDir) + if err != nil { + t.Fatal(err) + } + if len(dir) == 0 { + t.Fatalf("expected volumes config dir to contain data for new volume") + } + + // Now with just removing the volume config and not the volume data + if err := d.Stop(); err != nil { + t.Fatal(err) + } + + if err := os.RemoveAll(configDir); err != nil { + t.Fatal(err) + } + + if err := d.Start("-g", graphDir); err != nil { + t.Fatal(err) + } + + dir, err = ioutil.ReadDir(configDir) + if err != nil { + t.Fatal(err) + } + + if len(dir) == 0 { + t.Fatalf("expected volumes config dir to contain data for new volume") + } + + logDone("daemon - volumes from old(pre 1.3) daemon work") +} diff --git a/volumes/repository.go b/volumes/repository.go index 91f98fc060..e125677680 100644 --- a/volumes/repository.go +++ b/volumes/repository.go @@ -57,9 +57,10 @@ func (r *Repository) newVolume(path string, writable bool) (*Volume, error) { } path = filepath.Clean(path) - path, err = filepath.EvalSymlinks(path) - if err != nil { - return nil, err + // Ignore the error here since the path may not exist + // Really just want to make sure the path we are using is real(or non-existant) + if cleanPath, err := filepath.EvalSymlinks(path); err == nil { + path = cleanPath } v := &Volume{ From a90e91b500bb1a39a7726025973b5748148768c1 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 26 Jan 2015 12:58:45 -0800 Subject: [PATCH 049/653] Add file path to errors loading the key file Signed-off-by: Derek McGowan (github: dmcgowan) --- api/common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/common.go b/api/common.go index fb3eefaca0..bd0639e0b5 100644 --- a/api/common.go +++ b/api/common.go @@ -69,7 +69,7 @@ func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { return nil, fmt.Errorf("Error saving key file: %s", err) } } else if err != nil { - return nil, fmt.Errorf("Error loading key file: %s", err) + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) } return trustKey, nil } From fd2d45d7d465fe02f159f21389b92164dbb433d3 Mon Sep 17 00:00:00 2001 From: Alexander Morozov Date: Mon, 26 Jan 2015 14:00:44 -0800 Subject: [PATCH 050/653] Update libcontainer to 2d3b5af7486f1a4e80a5ed91859d309b4eebf80c This revision is from docker_1.5 branch, because we don't want to introduce user namespace in docker 1.5, but fix for --pid=host is needed. Fixes #10303 Signed-off-by: Alexander Morozov --- project/vendor.sh | 3 ++- .../docker/libcontainer/namespaces/exec.go | 22 ++++++++++++++----- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/project/vendor.sh b/project/vendor.sh index b60e42f5c4..634e17602c 100755 --- a/project/vendor.sh +++ b/project/vendor.sh @@ -68,7 +68,8 @@ if [ "$1" = '--go' ]; then mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar fi -clone git github.com/docker/libcontainer eb74393a3d2daeafbef4f5f27c0821cbdd67559c +# this commit is from docker_1.5 branch in libcontainer, pls delete that branch when you'll update libcontainer again +clone git github.com/docker/libcontainer 2d3b5af7486f1a4e80a5ed91859d309b4eebf80c # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go index bfaa755afc..ff00396979 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go @@ -50,10 +50,20 @@ func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Wri } child.Close() + wait := func() (*os.ProcessState, error) { + ps, err := command.Process.Wait() + // we should kill all processes in cgroup when init is died if we use + // host PID namespace + if !container.Namespaces.Contains(libcontainer.NEWPID) { + killAllPids(container) + } + return ps, err + } + terminate := func(terr error) (int, error) { // TODO: log the errors for kill and wait command.Process.Kill() - command.Wait() + wait() return -1, terr } @@ -109,16 +119,16 @@ func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Wri startCallback() } - if err := command.Wait(); err != nil { + ps, err := wait() + if err != nil { if _, ok := err.(*exec.ExitError); !ok { return -1, err } } - if !container.Namespaces.Contains(libcontainer.NEWPID) { - killAllPids(container) - } + // waiting for pipe flushing + command.Wait() - waitStatus := command.ProcessState.Sys().(syscall.WaitStatus) + waitStatus := ps.Sys().(syscall.WaitStatus) if waitStatus.Signaled() { return EXIT_SIGNAL_OFFSET + int(waitStatus.Signal()), nil } From d277714614f62dba276227f70cc2a47eac6e02e7 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 26 Jan 2015 14:00:51 -0800 Subject: [PATCH 051/653] Better error messaging and logging for v2 registry requests Signed-off-by: Derek McGowan (github: dmcgowan) --- graph/push.go | 2 +- registry/session_v2.go | 20 ++++++++++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/graph/push.go b/graph/push.go index b8fb09882e..5507ca5229 100644 --- a/graph/push.go +++ b/graph/push.go @@ -411,7 +411,7 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status { } // error out, no fallback to V1 - return job.Error(err) + return job.Errorf("Error pushing to registry: %s", err) } if err != nil { diff --git a/registry/session_v2.go b/registry/session_v2.go index fa02bd3e6c..8bbc9fe9b6 100644 --- a/registry/session_v2.go +++ b/registry/session_v2.go @@ -132,7 +132,7 @@ func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, // return something indicating blob push needed return false, nil } - return false, fmt.Errorf("Failed to mount %q - %s:%s : %d", imageName, sumType, sum, res.StatusCode) + return false, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying head request for %s - %s:%s", res.StatusCode, imageName, sumType, sum), res) } func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, blobWrtr io.Writer, auth *RequestAuthorization) error { @@ -189,7 +189,7 @@ func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum str if res.StatusCode == 401 { return nil, 0, errLoginRequired } - return nil, 0, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) + return nil, 0, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob - %s:%s", res.StatusCode, imageName, sumType, sum), res) } lenStr := res.Header.Get("Content-Length") l, err := strconv.ParseInt(lenStr, 10, 64) @@ -246,7 +246,12 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string if res.StatusCode == 401 { return errLoginRequired } - return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob", res.StatusCode, imageName), res) + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) + return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob - %s:%s", res.StatusCode, imageName, sumType, sumStr), res) } return nil @@ -272,13 +277,16 @@ func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, ma if err != nil { return err } - b, _ := ioutil.ReadAll(res.Body) - res.Body.Close() + defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { return errLoginRequired } - log.Debugf("Unexpected response from server: %q %#v", b, res.Header) + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res) } From 1e41d57bd44bd503d0642c8ea20123541c146218 Mon Sep 17 00:00:00 2001 From: unclejack Date: Sat, 24 Jan 2015 08:35:03 +0200 Subject: [PATCH 052/653] docs: compress search_content.json for release Signed-off-by: Cristian Staretu Docker-DCO-1.1-Signed-off-by: unclejack (github: SvenDowideit) --- docs/release.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/release.sh b/docs/release.sh index de064706bc..4491af6247 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -72,6 +72,8 @@ setup_s3() { build_current_documentation() { mkdocs build + cd site/ + gzip -9k search_content.json } upload_current_documentation() { From a78130467a03b95d1852bb2e6d10120bd8e6a6e7 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Mon, 26 Jan 2015 16:37:03 +1000 Subject: [PATCH 053/653] Change to load the json.gz file Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/release.sh | 3 ++- docs/theme/mkdocs/js/base.js | 14 +++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/release.sh b/docs/release.sh index 4491af6247..16bd759479 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -73,7 +73,8 @@ setup_s3() { build_current_documentation() { mkdocs build cd site/ - gzip -9k search_content.json + gzip -9k -f search_content.json + cd .. } upload_current_documentation() { diff --git a/docs/theme/mkdocs/js/base.js b/docs/theme/mkdocs/js/base.js index 1406dcd177..04f0c30c70 100644 --- a/docs/theme/mkdocs/js/base.js +++ b/docs/theme/mkdocs/js/base.js @@ -1,12 +1,6 @@ $(document).ready(function () { - // Tipue Search activation - $('#tipue_search_input').tipuesearch({ - 'mode': 'json', - 'contentLocation': '/search_content.json' - }); - prettyPrint(); // Resizing @@ -51,6 +45,12 @@ $(document).ready(function () }, }); + // Tipue Search activation + $('#tipue_search_input').tipuesearch({ + 'mode': 'json', + 'contentLocation': '/search_content.json.gz' + }); + }); function resizeMenuDropdown () @@ -92,4 +92,4 @@ function getCookie(cname) { if (c.indexOf(name) == 0) return c.substring(name.length,c.length); } return ""; -} \ No newline at end of file +} From 64f67af2b2bab7c9d3b4458b194c10f7428bf257 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Mon, 26 Jan 2015 21:26:38 +1000 Subject: [PATCH 054/653] set the content-type for the search_content.json.gz Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/release.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/release.sh b/docs/release.sh index 16bd759479..9f51e0296f 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -91,7 +91,6 @@ upload_current_documentation() { echo " to $dst" echo #s3cmd --recursive --follow-symlinks --preserve --acl-public sync "$src" "$dst" - #aws s3 cp --profile $BUCKET --cache-control "max-age=3600" --acl public-read "site/search_content.json" "$dst" # a really complicated way to send only the files we want # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go @@ -103,6 +102,9 @@ upload_current_documentation() { echo "$run" echo "=======================" $run + + # Make sure the search_content.json.gz file has the right content-encoding + aws s3 cp --profile $BUCKET --cache-control "max-age=3600" --content-encoding="gzip" --acl public-read "site/search_content.json.gz" "$dst" } invalidate_cache() { From 60089ace4895d9ab9c9fe9f2fb4b0db8a0d9aeda Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Mon, 26 Jan 2015 21:44:17 +1000 Subject: [PATCH 055/653] as we're not using the search suggestion feature only load the search_content when we have a search ?q= param Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/release.sh | 3 +-- docs/theme/mkdocs/js/base.js | 16 +++++++++++----- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/docs/release.sh b/docs/release.sh index 9f51e0296f..975940f5d5 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -90,7 +90,6 @@ upload_current_documentation() { echo "Uploading $src" echo " to $dst" echo - #s3cmd --recursive --follow-symlinks --preserve --acl-public sync "$src" "$dst" # a really complicated way to send only the files we want # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go @@ -104,7 +103,7 @@ upload_current_documentation() { $run # Make sure the search_content.json.gz file has the right content-encoding - aws s3 cp --profile $BUCKET --cache-control "max-age=3600" --content-encoding="gzip" --acl public-read "site/search_content.json.gz" "$dst" + aws s3 cp --profile $BUCKET --cache-control $cache --content-encoding="gzip" --acl public-read "site/search_content.json.gz" "$dst" } invalidate_cache() { diff --git a/docs/theme/mkdocs/js/base.js b/docs/theme/mkdocs/js/base.js index 04f0c30c70..b4775c837b 100644 --- a/docs/theme/mkdocs/js/base.js +++ b/docs/theme/mkdocs/js/base.js @@ -45,11 +45,17 @@ $(document).ready(function () }, }); - // Tipue Search activation - $('#tipue_search_input').tipuesearch({ - 'mode': 'json', - 'contentLocation': '/search_content.json.gz' - }); + function getURLP(name) + { + return decodeURIComponent((new RegExp('[?|&]' + name + '=' + '([^&;]+?)(&|#|;|$)').exec(location.search)||[,""])[1].replace(/\+/g, '%20')) || null; + } + if (getURLP("q")) { + // Tipue Search activation + $('#tipue_search_input').tipuesearch({ + 'mode': 'json', + 'contentLocation': '/search_content.json.gz' + }); + } }); From 87d2adf070a44d3d62dfe7ca8c13e95e92f53b64 Mon Sep 17 00:00:00 2001 From: Tony Miller Date: Tue, 27 Jan 2015 10:12:54 +0900 Subject: [PATCH 056/653] fix /etc/host typo in remote API docs Signed-off-by: Tony Miller --- docs/sources/reference/api/docker_remote_api_v1.15.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.16.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.17.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index 229a05b1b4..47fe21e92e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -222,7 +222,7 @@ Json Parameters: - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to be added to the - container's `/etc/host` file. Specified in the form `["hostname:IP"]`. + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilties to add to the container. diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index c701a58bf0..9934ab7716 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -222,7 +222,7 @@ Json Parameters: - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to be added to the - container's `/etc/host` file. Specified in the form `["hostname:IP"]`. + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilties to add to the container. diff --git a/docs/sources/reference/api/docker_remote_api_v1.17.md b/docs/sources/reference/api/docker_remote_api_v1.17.md index 400e197140..d6d0c1b4aa 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.17.md +++ b/docs/sources/reference/api/docker_remote_api_v1.17.md @@ -225,7 +225,7 @@ Json Parameters: - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to be added to the - container's `/etc/host` file. Specified in the form `["hostname:IP"]`. + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilties to add to the container. From f709da192cda47c9aaaa212b03442d4f28afdb97 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Mon, 26 Jan 2015 05:29:49 -0800 Subject: [PATCH 057/653] Fix docs so WORKDIR mentions it works for COPY and ADD too The docs around COPY/ADD already mentioned that it will do a relative copy/add based on WORKDIR, so that part is already ok. Just needed to tweak the WORKDIR section since w/o mentioning COPY/ADD it can be misleading. Noticed by @phemmer Signed-off-by: Doug Davis --- docs/man/Dockerfile.5.md | 2 +- docs/sources/reference/builder.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/man/Dockerfile.5.md b/docs/man/Dockerfile.5.md index 787142d29a..20fbf8ac31 100644 --- a/docs/man/Dockerfile.5.md +++ b/docs/man/Dockerfile.5.md @@ -195,7 +195,7 @@ or **WORKDIR** -- **WORKDIR /path/to/workdir** - The WORKDIR instruction sets the working directory for the **RUN**, **CMD**, and **ENTRYPOINT** Dockerfile commands that follow it. + The WORKDIR instruction sets the working directory for the **RUN**, **CMD**, **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can be used multiple times in a single Dockerfile. Relative paths are defined relative to the path of the previous **WORKDIR** instruction. For example: **WORKDIR /a WORKDIR b WORKDIR c RUN pwd** In the above example, the output of the **pwd** command is **a/b/c**. diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 43e88c766d..3931bb28a5 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -801,8 +801,8 @@ and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the WORKDIR /path/to/workdir -The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD` and -`ENTRYPOINT` instructions that follow it in the `Dockerfile`. +The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, +`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. It can be used multiple times in the one `Dockerfile`. If a relative path is provided, it will be relative to the path of the previous `WORKDIR` From 6a2c6e971d8b760931402e15513e91c71cba4b72 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Mon, 26 Jan 2015 17:17:08 -0800 Subject: [PATCH 058/653] Move one last exec test :) Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_exec_test.go | 56 +++++++++++++++++++++++++ integration-cli/docker_cli_run_test.go | 56 ------------------------- 2 files changed, 56 insertions(+), 56 deletions(-) diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 85906a8ae0..0b8af53569 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -623,3 +623,59 @@ func TestRunExecDir(t *testing.T) { logDone("run - check execdriver dir behavior") } + +func TestRunMutableNetworkFiles(t *testing.T) { + defer deleteAllContainers() + + for _, fn := range []string{"resolv.conf", "hosts"} { + deleteAllContainers() + + content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(string(content)) != "success" { + t.Fatal("Content was not what was modified in the container", string(content)) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "top")) + if err != nil { + t.Fatal(err) + } + + contID := strings.TrimSpace(out) + + netFilePath := containerStorageFile(contID, fn) + + f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + if err != nil { + t.Fatal(err) + } + + if _, err := f.Seek(0, 0); err != nil { + f.Close() + t.Fatal(err) + } + + if err := f.Truncate(0); err != nil { + f.Close() + t.Fatal(err) + } + + if _, err := f.Write([]byte("success2\n")); err != nil { + f.Close() + t.Fatal(err) + } + f.Close() + + res, err := exec.Command(dockerBinary, "exec", contID, "cat", "/etc/"+fn).CombinedOutput() + if err != nil { + t.Fatalf("Output: %s, error: %s", res, err) + } + if string(res) != "success2\n" { + t.Fatalf("Expected content of %s: %q, got: %q", fn, "success2\n", res) + } + } + logDone("run - mutable network files") +} diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index fecc9494d3..c7e476f0b8 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2166,62 +2166,6 @@ func TestRunBindMounts(t *testing.T) { logDone("run - bind mounts") } -func TestRunMutableNetworkFiles(t *testing.T) { - defer deleteAllContainers() - - for _, fn := range []string{"resolv.conf", "hosts"} { - deleteAllContainers() - - content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) - if err != nil { - t.Fatal(err) - } - - if strings.TrimSpace(string(content)) != "success" { - t.Fatal("Content was not what was modified in the container", string(content)) - } - - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "top")) - if err != nil { - t.Fatal(err) - } - - contID := strings.TrimSpace(out) - - netFilePath := containerStorageFile(contID, fn) - - f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) - if err != nil { - t.Fatal(err) - } - - if _, err := f.Seek(0, 0); err != nil { - f.Close() - t.Fatal(err) - } - - if err := f.Truncate(0); err != nil { - f.Close() - t.Fatal(err) - } - - if _, err := f.Write([]byte("success2\n")); err != nil { - f.Close() - t.Fatal(err) - } - f.Close() - - res, err := exec.Command(dockerBinary, "exec", contID, "cat", "/etc/"+fn).CombinedOutput() - if err != nil { - t.Fatalf("Output: %s, error: %s", res, err) - } - if string(res) != "success2\n" { - t.Fatalf("Expected content of %s: %q, got: %q", fn, "success2\n", res) - } - } - logDone("run - mutable network files") -} - // Ensure that CIDFile gets deleted if it's empty // Perform this test by making `docker run` fail func TestRunCidFileCleanupIfEmpty(t *testing.T) { From dcfa881a7b12e9a737b25ada98ec0c76e58c9c9c Mon Sep 17 00:00:00 2001 From: Chen Hanxiao Date: Tue, 27 Jan 2015 11:19:02 +0800 Subject: [PATCH 059/653] docs: fix a typo in docker-build man page s/Dockefile/Dockerfile Signed-off-by: Chen Hanxiao --- docs/man/docker-build.1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/man/docker-build.1.md b/docs/man/docker-build.1.md index 98bf3771ab..661ef35162 100644 --- a/docs/man/docker-build.1.md +++ b/docs/man/docker-build.1.md @@ -59,7 +59,7 @@ as context. # EXAMPLES -## Building an image using a Dockefile located inside the current directory +## Building an image using a Dockerfile located inside the current directory Docker images can be built using the build command and a Dockerfile: From 6774be62d647b2e2f258bc7b4158cb9e10a3ecf6 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 27 Jan 2015 14:55:43 +1000 Subject: [PATCH 060/653] tell users they can what IP range Hub webhooks can come from so they can filter Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) Signed-off-by: Sven Dowideit --- docs/sources/docker-hub/builds.md | 4 ++++ docs/sources/docker-hub/repos.md | 10 +++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md index 5d73e4aae9..8b914fa10c 100644 --- a/docs/sources/docker-hub/builds.md +++ b/docs/sources/docker-hub/builds.md @@ -278,6 +278,10 @@ Webhooks are available under the Settings menu of each Repository. > **Note:** If you want to test your webhook out we recommend using > a tool like [requestb.in](http://requestb.in/). +> **Note**: The Docker Hub servers are currently in the IP range +> `162.242.195.64 - 162.242.195.127`, so you can restrict your webhooks to +> accept webhook requests from that set of IP addresses. + ### Webhook chains Webhook chains allow you to chain calls to multiple services. For example, diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md index 0749c0814c..2bb75f0b73 100644 --- a/docs/sources/docker-hub/repos.md +++ b/docs/sources/docker-hub/repos.md @@ -105,9 +105,6 @@ Settings page. A webhook is called only after a successful `push` is made. The webhook calls are HTTP POST requests with a JSON payload similar to the example shown below. -> **Note:** For testing, you can try an HTTP request tool like -> [requestb.in](http://requestb.in/). - *Example webhook JSON payload:* ``` @@ -141,6 +138,13 @@ new updates to your images and repositories. To get started adding webhooks, go to the desired repo in the Hub, and click "Webhooks" under the "Settings" box. +> **Note:** For testing, you can try an HTTP request tool like +> [requestb.in](http://requestb.in/). + +> **Note**: The Docker Hub servers are currently in the IP range +> `162.242.195.64 - 162.242.195.127`, so you can restrict your webhooks to +> accept webhook requests from that set of IP addresses. + ### Webhook chains Webhook chains allow you to chain calls to multiple services. For example, From 79e8ca04f5764c150b61b1b05785fb3926613fa1 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 27 Jan 2015 09:33:42 -0500 Subject: [PATCH 061/653] Update go-md2man Update fixes some rendering issues, including improperly escaping '$' in blocks, and actual parsing of blockcode. `ID=$(sudo docker run -d fedora /usr/bin/top -b)` was being converted to `ID=do docker run -d fedora/usr/bin/top -b)` Signed-off-by: Brian Goff --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 50920945e2..67b9ea1690 100644 --- a/Dockerfile +++ b/Dockerfile @@ -151,7 +151,7 @@ ENV DOCKER_BUILDTAGS apparmor selinux btrfs_noversion COPY vendor /go/src/github.com/docker/docker/vendor # (copy vendor/ because go-md2man needs golang.org/x/net) RUN set -x \ - && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ + && git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ && git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday \ && go install -v github.com/cpuguy83/go-md2man From e662775ffb096fdb4a7f247bb25b8e3022006c1b Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Mon, 26 Jan 2015 20:56:34 -0800 Subject: [PATCH 062/653] Fix premature close of build output on pull The build job will sometimes trigger a pull job when the base image does not exist. Now that engine jobs properly close their output by default the pull job would also close the build job's stdout in a cascading close upon completion of the pull. This patch corrects this by wrapping the `pull` job's stdout with a nopCloseWriter which will not close the stdout of the `build` job. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- builder/internals.go | 3 +- engine/engine_test.go | 84 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 1 deletion(-) diff --git a/builder/internals.go b/builder/internals.go index 830da72725..ddbef108a0 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -25,6 +25,7 @@ import ( imagepkg "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" @@ -433,7 +434,7 @@ func (b *Builder) pullImage(name string) (*imagepkg.Image, error) { job.SetenvBool("json", b.StreamFormatter.Json()) job.SetenvBool("parallel", true) job.SetenvJson("authConfig", pullRegistryAuth) - job.Stdout.Add(b.OutOld) + job.Stdout.Add(ioutils.NopWriteCloser(b.OutOld)) if err := job.Run(); err != nil { return nil, err } diff --git a/engine/engine_test.go b/engine/engine_test.go index 7ab2f8fc0d..96c3f0df30 100644 --- a/engine/engine_test.go +++ b/engine/engine_test.go @@ -4,6 +4,8 @@ import ( "bytes" "strings" "testing" + + "github.com/docker/docker/pkg/ioutils" ) func TestRegister(t *testing.T) { @@ -150,3 +152,85 @@ func TestCatchallEmptyName(t *testing.T) { t.Fatalf("Engine.Job(\"\").Run() should return an error") } } + +// Ensure that a job within a job both using the same underlying standard +// output writer does not close the output of the outer job when the inner +// job's stdout is wrapped with a NopCloser. When not wrapped, it should +// close the outer job's output. +func TestNestedJobSharedOutput(t *testing.T) { + var ( + outerHandler Handler + innerHandler Handler + wrapOutput bool + ) + + outerHandler = func(job *Job) Status { + job.Stdout.Write([]byte("outer1")) + + innerJob := job.Eng.Job("innerJob") + + if wrapOutput { + innerJob.Stdout.Add(ioutils.NopWriteCloser(job.Stdout)) + } else { + innerJob.Stdout.Add(job.Stdout) + } + + if err := innerJob.Run(); err != nil { + t.Fatal(err) + } + + // If wrapOutput was *false* this write will do nothing. + // FIXME (jlhawn): It should cause an error to write to + // closed output. + job.Stdout.Write([]byte(" outer2")) + + return StatusOK + } + + innerHandler = func(job *Job) Status { + job.Stdout.Write([]byte(" inner")) + + return StatusOK + } + + eng := New() + eng.Register("outerJob", outerHandler) + eng.Register("innerJob", innerHandler) + + // wrapOutput starts *false* so the expected + // output of running the outer job will be: + // + // "outer1 inner" + // + outBuf := new(bytes.Buffer) + outerJob := eng.Job("outerJob") + outerJob.Stdout.Add(outBuf) + + if err := outerJob.Run(); err != nil { + t.Fatal(err) + } + + expectedOutput := "outer1 inner" + if outBuf.String() != expectedOutput { + t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String()) + } + + // Set wrapOutput to true so that the expected + // output of running the outer job will be: + // + // "outer1 inner outer2" + // + wrapOutput = true + outBuf.Reset() + outerJob = eng.Job("outerJob") + outerJob.Stdout.Add(outBuf) + + if err := outerJob.Run(); err != nil { + t.Fatal(err) + } + + expectedOutput = "outer1 inner outer2" + if outBuf.String() != expectedOutput { + t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String()) + } +} From ab589b442d55597de74201729ba893977ea73b7f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 27 Jan 2015 10:21:35 -0800 Subject: [PATCH 063/653] Use layer checksum if calculated during manifest creation Signed-off-by: Derek McGowan (github: dmcgowan) --- graph/manifest.go | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/graph/manifest.go b/graph/manifest.go index 18784bb1e0..6bebb7e5e8 100644 --- a/graph/manifest.go +++ b/graph/manifest.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "io/ioutil" - "path" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" @@ -93,28 +92,33 @@ func (s *TagStore) newManifest(localName, remoteName, tag string) ([]byte, error } } - archive, err := layer.TarLayer() - if err != nil { - return nil, err + checksum := layer.Checksum + if tarsum.VersionLabelForChecksum(checksum) != tarsum.Version1.String() { + archive, err := layer.TarLayer() + if err != nil { + return nil, err + } + + tarSum, err := tarsum.NewTarSum(archive, true, tarsum.Version1) + if err != nil { + return nil, err + } + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return nil, err + } + + checksum = tarSum.Sum(nil) } - tarSum, err := tarsum.NewTarSum(archive, true, tarsum.Version1) - if err != nil { - return nil, err - } - if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { - return nil, err - } - - tarId := tarSum.Sum(nil) - - manifest.FSLayers = append(manifest.FSLayers, ®istry.FSLayer{BlobSum: tarId}) - - layersSeen[layer.ID] = true - jsonData, err := ioutil.ReadFile(path.Join(s.graph.Root, layer.ID, "json")) + jsonData, err := layer.RawJson() if err != nil { return nil, fmt.Errorf("Cannot retrieve the path for {%s}: %s", layer.ID, err) } + + manifest.FSLayers = append(manifest.FSLayers, ®istry.FSLayer{BlobSum: checksum}) + + layersSeen[layer.ID] = true + manifest.History = append(manifest.History, ®istry.ManifestHistory{V1Compatibility: string(jsonData)}) } From 667dc58c3929112a2b8b8bc67ae54a394169c6df Mon Sep 17 00:00:00 2001 From: Alexander Morozov Date: Tue, 27 Jan 2015 16:40:11 -0800 Subject: [PATCH 064/653] Export DOCKER_GRAPHDRIVER and DOCKER_EXECDRIVER in integration-cli This needed for our "small" testing daemon using same config as "big" testing daemon. Signed-off-by: Alexander Morozov --- project/make/.integration-daemon-start | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/project/make/.integration-daemon-start b/project/make/.integration-daemon-start index f00bb6331f..5d3cd94140 100644 --- a/project/make/.integration-daemon-start +++ b/project/make/.integration-daemon-start @@ -12,8 +12,8 @@ fi # intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers exec 41>&1 42>&2 -DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} -DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native} +export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +export DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native} if [ -z "$DOCKER_TEST_HOST" ]; then ( set -x; exec \ From 1d04cc513a958009131ca54bd6904be14fef774e Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 28 Jan 2015 10:47:11 +1000 Subject: [PATCH 065/653] Add the registry mirror document to the menu Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/mkdocs.yml | 1 + docs/sources/articles.md | 15 --------------- 2 files changed, 1 insertion(+), 15 deletions(-) delete mode 100644 docs/sources/articles.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 73150cc443..6b8f4dc89a 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -86,6 +86,7 @@ pages: - ['articles/networking.md', 'Articles', 'Advanced networking'] - ['articles/security.md', 'Articles', 'Security'] - ['articles/https.md', 'Articles', 'Running Docker with HTTPS'] +- ['articles/registry_mirror.md', 'Articles', 'Run a local registry mirror'] - ['articles/host_integration.md', 'Articles', 'Automatically starting containers'] - ['articles/baseimages.md', 'Articles', 'Creating a base image'] - ['articles/dockerfile_best-practices.md', 'Articles', 'Best practices for writing Dockerfiles'] diff --git a/docs/sources/articles.md b/docs/sources/articles.md deleted file mode 100644 index 37f2cd80f1..0000000000 --- a/docs/sources/articles.md +++ /dev/null @@ -1,15 +0,0 @@ -# Articles - - - [Docker Basics](basics/) - - [Docker Security](security/) - - [Running the Docker daemon with HTTPS](https/) - - [Configure Networking](networking/) - - [Using Supervisor with Docker](using_supervisord/) - - [Process Management with CFEngine](cfengine_process_management/) - - [Using Puppet](puppet/) - - [Create a Base Image](baseimages/) - - [Runtime Metrics](runmetrics/) - - [Automatically Start Containers](host_integration/) - - [Link via an Ambassador Container](ambassador_pattern_linking/) - - [Increase a Boot2Docker Volume](b2d_volume_resize/) - - [Run a Local Registry Mirror](registry_mirror/) From 71f8b09543d782727ada8f63ba7d6fa177ec54db Mon Sep 17 00:00:00 2001 From: Liu Hua Date: Wed, 28 Jan 2015 09:23:57 +0800 Subject: [PATCH 066/653] fix a minor typo in daemon/container.go Signed-off-by: Liu Hua --- daemon/container.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/container.go b/daemon/container.go index 90cd3d8327..7d3505a0f7 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -324,7 +324,7 @@ func (container *Container) Start() (err error) { return nil } - // if we encounter and error during start we need to ensure that any other + // if we encounter an error during start we need to ensure that any other // setup has been cleaned up properly defer func() { if err != nil { From 86aea582b66929c68de3346a26a26633b62a5784 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 27 Jan 2015 18:09:53 -0800 Subject: [PATCH 067/653] Open up v2 http status code checks for put and head checks Under certain cases, such as when putting a manifest or check for the existence of a layer, the status code checks in session_v2.go were too narrow for their purpose. In the case of putting a manifest, the handler only cares that an error is not returned. Whether it is a 304 or 202 does not matter, as long as the server reports success. Having the client only accept specific http codes inhibits future protocol evolution. Signed-off-by: Stephen J Day --- registry/session_v2.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/registry/session_v2.go b/registry/session_v2.go index 8bbc9fe9b6..dbef7df1ee 100644 --- a/registry/session_v2.go +++ b/registry/session_v2.go @@ -124,14 +124,15 @@ func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, return false, err } res.Body.Close() // close early, since we're not needing a body on this call .. yet? - switch res.StatusCode { - case 200: + switch { + case res.StatusCode >= 200 && res.StatusCode < 400: // return something indicating no push needed return true, nil - case 404: + case res.StatusCode == 404: // return something indicating blob push needed return false, nil } + return false, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying head request for %s - %s:%s", res.StatusCode, imageName, sumType, sum), res) } @@ -278,7 +279,9 @@ func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, ma return err } defer res.Body.Close() - if res.StatusCode != 200 { + + // All 2xx and 3xx responses can be accepted for a put. + if res.StatusCode >= 400 { if res.StatusCode == 401 { return errLoginRequired } From 629815b42472635c87ec6ce9ed4ec37019ae4ffa Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 27 Jan 2015 18:10:28 -0800 Subject: [PATCH 068/653] Buffer tar file on v2 push fixes #10312 fixes #10306 Signed-off-by: Derek McGowan (github: dmcgowan) --- graph/graph.go | 22 +++++++++++++++++++++ graph/push.go | 52 ++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 60 insertions(+), 14 deletions(-) diff --git a/graph/graph.go b/graph/graph.go index 30bea0470f..f7b9fc4f10 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -223,6 +223,28 @@ func (graph *Graph) Mktemp(id string) (string, error) { return dir, nil } +func (graph *Graph) newTempFile() (*os.File, error) { + tmp, err := graph.Mktemp("") + if err != nil { + return nil, err + } + return ioutil.TempFile(tmp, "") +} + +func bufferToFile(f *os.File, src io.Reader) (int64, error) { + n, err := io.Copy(f, src) + if err != nil { + return n, err + } + if err = f.Sync(); err != nil { + return n, err + } + if _, err := f.Seek(0, 0); err != nil { + return n, err + } + return n, nil +} + // setupInitLayer populates a directory with mountpoints suitable // for bind-mounting dockerinit into the container. The mountpoint is simply an // empty file at /.dockerinit diff --git a/graph/push.go b/graph/push.go index d3f3596e06..fafa41b9bf 100644 --- a/graph/push.go +++ b/graph/push.go @@ -322,16 +322,6 @@ func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out return fmt.Errorf("Failed to parse json: %s", err) } - img, err = s.graph.Get(img.ID) - if err != nil { - return err - } - - arch, err := img.TarLayer() - if err != nil { - return fmt.Errorf("Could not get tar layer: %s", err) - } - // Call mount blob exists, err := r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, auth) if err != nil { @@ -340,12 +330,9 @@ func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out } if !exists { - err = r.PutV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, utils.ProgressReader(arch, int(img.Size), out, sf, false, utils.TruncateID(img.ID), "Pushing"), auth) - if err != nil { - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) + if err := s.PushV2Image(r, img, endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, sf, out, auth); err != nil { return err } - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image successfully pushed", nil)) } else { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image already exists", nil)) } @@ -355,6 +342,43 @@ func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out return r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, bytes.NewReader([]byte(manifestBytes)), auth) } +// PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk +func (s *TagStore) PushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName, sumType, sumStr string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) error { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Buffering to Disk", nil)) + + image, err := s.graph.Get(img.ID) + if err != nil { + return err + } + arch, err := image.TarLayer() + if err != nil { + return err + } + tf, err := s.graph.newTempFile() + if err != nil { + return err + } + defer func() { + tf.Close() + os.Remove(tf.Name()) + }() + + size, err := bufferToFile(tf, arch) + if err != nil { + return err + } + + // Send the layer + log.Debugf("rendered layer for %s of [%d] size", img.ID, size) + + if err := r.PutV2ImageBlob(endpoint, imageName, sumType, sumStr, utils.ProgressReader(tf, int(size), out, sf, false, utils.TruncateID(img.ID), "Pushing"), auth); err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) + return err + } + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image successfully pushed", nil)) + return nil +} + // FIXME: Allow to interrupt current push when new push of same image is done. func (s *TagStore) CmdPush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { From 5945de43b02406dbc0eee44954eb21e5926bde00 Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Tue, 27 Jan 2015 22:45:43 -0500 Subject: [PATCH 069/653] Fix incorrect IPv6 addresses/subnet notations in docs Fixes a few typos in IPv6 addresses. Will make it easier for users who actually try and copy/paste or use the example addresses directly. Docker-DCO-1.1-Signed-off-by: Phil Estes (github: estesp) --- docs/sources/articles/networking.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md index b93286d91f..03693eb4d8 100644 --- a/docs/sources/articles/networking.md +++ b/docs/sources/articles/networking.md @@ -433,7 +433,7 @@ To assign globally routable IPv6 addresses to your containers you have to specify an IPv6 subnet to pick the addresses from. Set the IPv6 subnet via the `--fixed-cidr-v6` parameter when starting Docker daemon: - docker -d --ipv6 --fixed-cidr-v6="2001:db8:0:2:/64" + docker -d --ipv6 --fixed-cidr-v6="2001:db8:0:2::/64" The subnet for Docker containers should at least have a size of `/80`. This way an IPv6 address can end with the container's MAC address and you prevent NDP @@ -443,11 +443,11 @@ With the `--fixed-cidr-v6` parameter set Docker will add a new route to the routing table. Further IPv6 routing will be enabled (you may prevent this by starting Docker daemon with `--ip-forward=false`): - $ route -A inet6 add 2001:db8:0:2/64 dev docker0 + $ route -A inet6 add 2001:db8:0:2::/64 dev docker0 $ echo 1 > /proc/sys/net/ipv6/conf/default/forwarding $ echo 1 > /proc/sys/net/ipv6/conf/all/forwarding -All traffic to the subnet `2001:db8:0:2/64` will now be routed +All traffic to the subnet `2001:db8:0:2::/64` will now be routed via the `docker0` interface. Be aware that IPv6 forwarding may interfere with your existing IPv6 From 0c8d17b5c1a142bc09abe1105d985e76db6f225d Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Tue, 27 Jan 2015 22:03:27 -0500 Subject: [PATCH 070/653] Fix bridge initialization for IPv6 if IPv4-only docker0 exists This fixes the daemon's failure to start when setting --ipv6=true for the first time without deleting `docker0` bridge from a prior use with only IPv4 addressing. The addition of the IPv6 bridge address is factored out into a separate initialization routine which is called even if the bridge exists but no IPv6 addresses are found. Docker-DCO-1.1-Signed-off-by: Phil Estes (github: estesp) --- daemon/networkdriver/bridge/driver.go | 53 ++++++++++++++++++++------- daemon/networkdriver/utils.go | 2 +- 2 files changed, 41 insertions(+), 14 deletions(-) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index 0d3f27517d..f331f17244 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -150,6 +150,21 @@ func InitDriver(job *engine.Job) engine.Status { } } + // a bridge might exist but not have any IPv6 addr associated with it yet + // (for example, an existing Docker installation that has only been used + // with IPv4 and docker0 already is set up) In that case, we can perform + // the bridge init for IPv6 here, else we will error out below if --ipv6=true + if len(addrsv6) == 0 && enableIPv6 { + if err := setupIPv6Bridge(bridgeIPv6); err != nil { + return job.Error(err) + } + // recheck addresses now that IPv6 is setup on the bridge + addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + return job.Error(err) + } + } + // TODO: Check if route to fixedCIDRv6 is set } @@ -401,21 +416,9 @@ func configureBridge(bridgeIP string, bridgeIPv6 string, enableIPv6 bool) error } if enableIPv6 { - // Enable IPv6 on the bridge - procFile := "/proc/sys/net/ipv6/conf/" + iface.Name + "/disable_ipv6" - if err := ioutil.WriteFile(procFile, []byte{'0', '\n'}, 0644); err != nil { - return fmt.Errorf("unable to enable IPv6 addresses on bridge: %s\n", err) - } - - ipAddr6, ipNet6, err := net.ParseCIDR(bridgeIPv6) - if err != nil { - log.Errorf("BridgeIPv6 parsing failed") + if err := setupIPv6Bridge(bridgeIPv6); err != nil { return err } - - if err := netlink.NetworkLinkAddIp(iface, ipAddr6, ipNet6); err != nil { - return fmt.Errorf("Unable to add private IPv6 network: %s", err) - } } if err := netlink.NetworkLinkUp(iface); err != nil { @@ -424,6 +427,30 @@ func configureBridge(bridgeIP string, bridgeIPv6 string, enableIPv6 bool) error return nil } +func setupIPv6Bridge(bridgeIPv6 string) error { + + iface, err := net.InterfaceByName(bridgeIface) + if err != nil { + return err + } + // Enable IPv6 on the bridge + procFile := "/proc/sys/net/ipv6/conf/" + iface.Name + "/disable_ipv6" + if err := ioutil.WriteFile(procFile, []byte{'0', '\n'}, 0644); err != nil { + return fmt.Errorf("Unable to enable IPv6 addresses on bridge: %v", err) + } + + ipAddr6, ipNet6, err := net.ParseCIDR(bridgeIPv6) + if err != nil { + return fmt.Errorf("Unable to parse bridge IPv6 address: %q, error: %v", bridgeIPv6, err) + } + + if err := netlink.NetworkLinkAddIp(iface, ipAddr6, ipNet6); err != nil { + return fmt.Errorf("Unable to add private IPv6 network: %v", err) + } + + return nil +} + func createBridgeIface(name string) error { kv, err := kernel.GetKernelVersion() // only set the bridge's mac address if the kernel version is > 3.3 diff --git a/daemon/networkdriver/utils.go b/daemon/networkdriver/utils.go index 833744b57e..9f0c88cd5e 100644 --- a/daemon/networkdriver/utils.go +++ b/daemon/networkdriver/utils.go @@ -74,7 +74,7 @@ func NetworkRange(network *net.IPNet) (net.IP, net.IP) { return netIP.Mask(network.Mask), net.IP(lastIP) } -// Return the IPv4 address of a network interface +// Return the first IPv4 address and slice of IPv6 addresses for the specified network interface func GetIfaceAddr(name string) (net.Addr, []net.Addr, error) { iface, err := net.InterfaceByName(name) if err != nil { From 9cea20ffc5904201efdda2c8d6623759e15ad8eb Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Wed, 28 Jan 2015 08:52:06 -0800 Subject: [PATCH 071/653] Update fish completion for 1.5.0 Signed-off-by: Arnaud Porterie --- contrib/completion/fish/docker.fish | 94 +++++++++++++++++++++++------ 1 file changed, 76 insertions(+), 18 deletions(-) diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index 41c4a33008..fe92ecc56f 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -51,23 +51,28 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the Docker runtime to use a specific exec driver' -complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' -complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' -complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" +complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' +complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level (debug, info, warn, error, fatal)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by tls-verify flags' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify flag' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' @@ -77,14 +82,18 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print # subcommands # attach complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" # build complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' @@ -92,12 +101,14 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d ' # commit complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" # cp complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path" +complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' # create complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' @@ -108,23 +119,29 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d ' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d 'Set custom DNS search domains' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of name:alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l lxc-conf -d '(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: , where unit = b, k, m or g)" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' @@ -136,26 +153,32 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_pri # diff complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" # events complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' # exec -complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in an existing container' +complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" # export complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" # history complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" @@ -164,34 +187,40 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_pr complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" # import complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' +complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' # info complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' # inspect -complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container' +complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" # kill complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" # load complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' # login complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or log in to a Docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' @@ -201,12 +230,14 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out fro # logs complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" # port complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" # pause @@ -218,32 +249,40 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' # pull complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" # push complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" +# rename +complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' + # restart complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" # rm complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" @@ -251,6 +290,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_d # rmi complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" @@ -264,27 +304,33 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Dr complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d 'Set custom DNS search domains' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of name:alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d '(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: , where unit = b, k, m or g)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' @@ -294,32 +340,43 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_ # save complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' -complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to a file, instead of STDOUT' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" # search complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the Docker Hub' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' # start complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container' complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" +# stats +complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" + # stop complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" # tag complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' # top complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" # unpause @@ -331,6 +388,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the D # wait complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" From ef957f43518a8e636bca48604ebe02afdba775bd Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 28 Jan 2015 09:44:56 -0800 Subject: [PATCH 072/653] Added tianon's info and changed a typo. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- MAINTAINERS | 55 +++++++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index da7a2c851f..8c2a6f9fe9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -222,7 +222,7 @@ made through a pull request. [Org.Operators.infrastructure] people = [ - "jess", + "jfrazelle", "crosbymichael" ] @@ -387,99 +387,104 @@ made through a pull request. [people.aanand] Name = "Aanand Prasad" Email = "aanand@docker.com" - Github = "aanand" + GitHub = "aanand" [people.aluzzardi] Name = "Andrea Luzzardi" Email = "aluzzardi@docker.com" - Github = "aluzzardi" + GitHub = "aluzzardi" [people.bfirsh] Name = "Ben Firshman" Email = "ben@firshman.co.uk" - Github = "bfirsh" + GitHub = "bfirsh" [people.ehazlett] Name = "Evan Hazlett" Email = "ejhazlett@gmail.com" - Github = "ehazlett" + GitHub = "ehazlett" [people.erikh] Name = "Erik Hollensbe" Email = "erik@docker.com" - Github = "erikh" + GitHub = "erikh" [people.erw] Name = "Eric Windisch" Email = "eric@windisch.us" - Github = "ewindisch" + GitHub = "ewindisch" [people.icecrime] Name = "Arnaud Porterie" Email = "arnaud@docker.com" - Github = "icecrime" + GitHub = "icecrime" [people.jfrazelle] Name = "Jessie Frazelle" Email = "jess@docker.com" - Github = "jfrazelle" + GitHub = "jfrazelle" [people.lk4d4] Name = "Alexander Morozov" Email = "lk4d4@docker.com" - Github = "lk4d4" + GitHub = "lk4d4" [people.shykes] Name = "Solomon Hykes" Email = "solomon@docker.com" - Github = "shykes" - + GitHub = "shykes" + + [people.spf13] + Name = "Steve Francia" + Email = "steve.francia@gmail.com" + GitHub = "spf13" + [people.sven] Name = "Sven Dowideit" Email = "SvenDowideit@home.org.au" GitHub = "SvenDowideit" + [people.tianon] + Name = "Tianon Gravi" + Email = "admwiggin@gmail.com" + GitHub = "tianon" + [people.tibor] Name = "Tibor Vass" Email = "tibor@docker.com" - Github = "tiborvass" + GitHub = "tiborvass" [people.vbatts] Name = "Vincent Batts" Email = "vbatts@redhat.com" - Github = "vbatts" + GitHub = "vbatts" [people.vieux] Name = "Victor Vieux" Email = "vieux@docker.com" - Github = "vieux" + GitHub = "vieux" [people.vmarmol] Name = "Victor Marmol" Email = "vmarmol@google.com" - Github = "vmarmol" + GitHub = "vmarmol" [people.jnagal] Name = "Rohit Jnagal" Email = "jnagal@google.com" - Github = "rjnagal" + GitHub = "rjnagal" [people.mpatel] Name = "Mrunal Patel" Email = "mpatel@redhat.com" - Github = "mrunalp" + GitHub = "mrunalp" [people.unclejack] Name = "Cristian Staretu" Email = "cristian.staretu@gmail.com" - Github = "unclejack" + GitHub = "unclejack" [people.vish] Name = "Vishnu Kannan" Email = "vishnuk@google.com" - Github = "vishh" - - [people.spf13] - Name = "Steve Francia" - Email = "steve.francia@gmail.com" - Github = "spf13" + GitHub = "vishh" From 0fab79f20343db9d95da191cf473651c3c8f5f42 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 28 Jan 2015 14:08:41 -0700 Subject: [PATCH 073/653] Update .deb version numbers to be more sane Example output: ```console root@906b21a861fb:/go/src/github.com/docker/docker# ./hack/make.sh binary ubuntu bundles/1.4.1-dev already exists. Removing. ---> Making bundle: binary (in bundles/1.4.1-dev/binary) Created binary: /go/src/github.com/docker/docker/bundles/1.4.1-dev/binary/docker-1.4.1-dev ---> Making bundle: ubuntu (in bundles/1.4.1-dev/ubuntu) Created package {:path=>"lxc-docker-1.4.1-dev_1.4.1~dev~git20150128.182847.0.17e840a_amd64.deb"} Created package {:path=>"lxc-docker_1.4.1~dev~git20150128.182847.0.17e840a_amd64.deb"} ``` As noted in a comment in the code here, this sums up the reasoning for this change: (which is how APT and reprepro compare versions) ```console $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false true $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false true $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false true ``` ie, `1.5.0` > `1.5.0~rc1` > `1.5.0~git20150128.112847.17e840a` > `1.5.0~dev~git20150128.112847.17e840a` Signed-off-by: Andrew "Tianon" Page --- project/make/ubuntu | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/project/make/ubuntu b/project/make/ubuntu index 98ec423073..e34369eb16 100644 --- a/project/make/ubuntu +++ b/project/make/ubuntu @@ -2,11 +2,26 @@ DEST=$1 -PKGVERSION="$VERSION" -if [ -n "$(git status --porcelain)" ]; then - PKGVERSION="$PKGVERSION-$(date +%Y%m%d%H%M%S)-$GITCOMMIT" +PKGVERSION="${VERSION//-/'~'}" +# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + GIT_UNIX="$(git log -1 --pretty='%at')" + GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" + GIT_COMMIT="$(git log -1 --pretty='%h')" + GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" + # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' + PKGVERSION="$PKGVERSION~$GIT_VERSION" fi +# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false +# true + +# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" PACKAGE_URL="http://www.docker.com/" PACKAGE_MAINTAINER="support@docker.com" @@ -124,7 +139,7 @@ EOF # create lxc-docker-VERSION package fpm -s dir -C $DIR \ - --name lxc-docker-$VERSION --version $PKGVERSION \ + --name lxc-docker-$VERSION --version "$PKGVERSION" \ --after-install $DEST/postinst \ --before-remove $DEST/prerm \ --after-remove $DEST/postrm \ @@ -157,7 +172,7 @@ EOF # create empty lxc-docker wrapper package fpm -s empty \ - --name lxc-docker --version $PKGVERSION \ + --name lxc-docker --version "$PKGVERSION" \ --architecture "$PACKAGE_ARCHITECTURE" \ --depends lxc-docker-$VERSION \ --description "$PACKAGE_DESCRIPTION" \ From c0969ed3d896c9add0a2cca7b9a9074cd138571b Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Fri, 16 Jan 2015 14:04:24 +0100 Subject: [PATCH 074/653] Replace "base" with "ubuntu" in documentation The API documentation uses the "base" image in various places. The "base" image is deprecated and it is no longer possible to download this image. This changes the API documentation to use "ubuntu" in stead. Signed-off-by: Sebastiaan van Stijn --- .../reference/api/docker_remote_api_v1.10.md | 28 +++++++++---------- .../reference/api/docker_remote_api_v1.11.md | 28 +++++++++---------- .../reference/api/docker_remote_api_v1.12.md | 28 +++++++++---------- .../reference/api/docker_remote_api_v1.13.md | 28 +++++++++---------- .../reference/api/docker_remote_api_v1.14.md | 28 +++++++++---------- .../reference/api/docker_remote_api_v1.15.md | 28 +++++++++---------- .../reference/api/docker_remote_api_v1.16.md | 28 +++++++++---------- .../reference/api/docker_remote_api_v1.17.md | 26 ++++++++--------- 8 files changed, 111 insertions(+), 111 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index b9f421d38f..c5ba26c092 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -35,7 +35,7 @@ List containers [ { "Id": "8dfafdbc3a40", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", @@ -45,7 +45,7 @@ List containers }, { "Id": "9cd87474be90", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", @@ -55,7 +55,7 @@ List containers }, { "Id": "3176a2479c92", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", @@ -65,7 +65,7 @@ List containers }, { "Id": "4cb07b47f9fb", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", @@ -119,7 +119,7 @@ Create a container "Cmd":[ "date" ], - "Image":"base", + "Image":"ubuntu", "Volumes":{ "/tmp": {} }, @@ -194,7 +194,7 @@ Return low-level information on the container `id` "Cmd": [ "date" ], - "Image": "base", + "Image": "ubuntu", "Volumes": {}, "WorkingDir":"" @@ -702,7 +702,7 @@ Create an image, either by pull it from the registry or by importing **Example request**: - POST /images/create?fromImage=base HTTP/1.1 + POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: @@ -774,7 +774,7 @@ Return low-level information on the image `name` **Example request**: - GET /images/base/json HTTP/1.1 + GET /images/ubuntu/json HTTP/1.1 **Example response**: @@ -801,7 +801,7 @@ Return low-level information on the image `name` "StdinOnce":false, "Env":null, "Cmd": ["/bin/bash"] - "Image":"base", + "Image":"ubuntu", "Volumes":null, "WorkingDir":"" }, @@ -822,7 +822,7 @@ Return the history of the image `name` **Example request**: - GET /images/base/history HTTP/1.1 + GET /images/ubuntu/history HTTP/1.1 **Example response**: @@ -1229,10 +1229,10 @@ and Docker images will report: HTTP/1.1 200 OK Content-Type: application/json - {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} - {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 97f6c56700..102e9c4abd 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -35,7 +35,7 @@ List containers [ { "Id": "8dfafdbc3a40", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", @@ -45,7 +45,7 @@ List containers }, { "Id": "9cd87474be90", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", @@ -55,7 +55,7 @@ List containers }, { "Id": "3176a2479c92", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", @@ -65,7 +65,7 @@ List containers }, { "Id": "4cb07b47f9fb", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", @@ -119,7 +119,7 @@ Create a container "Cmd":[ "date" ], - "Image":"base", + "Image":"ubuntu", "Volumes":{ "/tmp": {} }, @@ -195,7 +195,7 @@ Return low-level information on the container `id` "date" ], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": {}, "VolumesFrom": "", "WorkingDir": "" @@ -736,7 +736,7 @@ Create an image, either by pull it from the registry or by importing i **Example request**: - POST /images/create?fromImage=base HTTP/1.1 + POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: @@ -777,7 +777,7 @@ Return low-level information on the image `name` **Example request**: - GET /images/base/json HTTP/1.1 + GET /images/ubuntu/json HTTP/1.1 **Example response**: @@ -805,7 +805,7 @@ Return low-level information on the image `name` "Env":null, "Cmd": ["/bin/bash"], "Dns":null, - "Image":"base", + "Image":"ubuntu", "Volumes":null, "VolumesFrom":"", "WorkingDir":"" @@ -827,7 +827,7 @@ Return the history of the image `name` **Example request**: - GET /images/base/history HTTP/1.1 + GET /images/ubuntu/history HTTP/1.1 **Example response**: @@ -1259,10 +1259,10 @@ and Docker images will report: HTTP/1.1 200 OK Content-Type: application/json - {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} - {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md index a0e4b209d4..910640181c 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.12.md +++ b/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -36,7 +36,7 @@ List containers [ { "Id": "8dfafdbc3a40", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", @@ -46,7 +46,7 @@ List containers }, { "Id": "9cd87474be90", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", @@ -56,7 +56,7 @@ List containers }, { "Id": "3176a2479c92", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", @@ -66,7 +66,7 @@ List containers }, { "Id": "4cb07b47f9fb", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", @@ -127,7 +127,7 @@ Create a container "Cmd":[ "date" ], - "Image":"base", + "Image":"ubuntu", "Volumes":{ "/tmp": {} }, @@ -204,7 +204,7 @@ Return low-level information on the container `id` "date" ], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": {}, "VolumesFrom": "", "WorkingDir": "" @@ -795,7 +795,7 @@ Create an image, either by pull it from the registry or by importing i **Example request**: - POST /images/create?fromImage=base HTTP/1.1 + POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: @@ -838,7 +838,7 @@ Return low-level information on the image `name` **Example request**: - GET /images/base/json HTTP/1.1 + GET /images/ubuntu/json HTTP/1.1 **Example response**: @@ -864,7 +864,7 @@ Return low-level information on the image `name` "Env": null, "Cmd": ["/bin/bash"], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" @@ -888,7 +888,7 @@ Return the history of the image `name` **Example request**: - GET /images/base/history HTTP/1.1 + GET /images/ubuntu/history HTTP/1.1 **Example response**: @@ -1325,10 +1325,10 @@ and Docker images will report: HTTP/1.1 200 OK Content-Type: application/json - {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} - {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md index 2ff844ce55..22fd81ef99 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.13.md +++ b/docs/sources/reference/api/docker_remote_api_v1.13.md @@ -36,7 +36,7 @@ List containers [ { "Id": "8dfafdbc3a40", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", @@ -46,7 +46,7 @@ List containers }, { "Id": "9cd87474be90", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", @@ -56,7 +56,7 @@ List containers }, { "Id": "3176a2479c92", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", @@ -66,7 +66,7 @@ List containers }, { "Id": "4cb07b47f9fb", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", @@ -121,7 +121,7 @@ Create a container "Cmd":[ "date" ], - "Image":"base", + "Image":"ubuntu", "Volumes":{ "/tmp": {} }, @@ -198,7 +198,7 @@ Return low-level information on the container `id` "date" ], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": {}, "VolumesFrom": "", "WorkingDir": "" @@ -784,7 +784,7 @@ Create an image, either by pulling it from the registry or by importing it **Example request**: - POST /images/create?fromImage=base HTTP/1.1 + POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: @@ -827,7 +827,7 @@ Return low-level information on the image `name` **Example request**: - GET /images/base/json HTTP/1.1 + GET /images/ubuntu/json HTTP/1.1 **Example response**: @@ -853,7 +853,7 @@ Return low-level information on the image `name` "Env": null, "Cmd": ["/bin/bash"], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" @@ -877,7 +877,7 @@ Return the history of the image `name` **Example request**: - GET /images/base/history HTTP/1.1 + GET /images/ubuntu/history HTTP/1.1 **Example response**: @@ -1314,10 +1314,10 @@ and Docker images will report: HTTP/1.1 200 OK Content-Type: application/json - {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} - {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.14.md b/docs/sources/reference/api/docker_remote_api_v1.14.md index 237872df22..fb956b3924 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.14.md +++ b/docs/sources/reference/api/docker_remote_api_v1.14.md @@ -36,7 +36,7 @@ List containers [ { "Id": "8dfafdbc3a40", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", @@ -46,7 +46,7 @@ List containers }, { "Id": "9cd87474be90", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", @@ -56,7 +56,7 @@ List containers }, { "Id": "3176a2479c92", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", @@ -66,7 +66,7 @@ List containers }, { "Id": "4cb07b47f9fb", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", @@ -124,7 +124,7 @@ Create a container "Cmd":[ "date" ], - "Image":"base", + "Image":"ubuntu", "Volumes":{ "/tmp": {} }, @@ -207,7 +207,7 @@ Return low-level information on the container `id` "date" ], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": {}, "VolumesFrom": "", "WorkingDir": "" @@ -794,7 +794,7 @@ Create an image, either by pulling it from the registry or by importing it **Example request**: - POST /images/create?fromImage=base HTTP/1.1 + POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: @@ -837,7 +837,7 @@ Return low-level information on the image `name` **Example request**: - GET /images/base/json HTTP/1.1 + GET /images/ubuntu/json HTTP/1.1 **Example response**: @@ -863,7 +863,7 @@ Return low-level information on the image `name` "Env": null, "Cmd": ["/bin/bash"], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" @@ -887,7 +887,7 @@ Return the history of the image `name` **Example request**: - GET /images/base/history HTTP/1.1 + GET /images/ubuntu/history HTTP/1.1 **Example response**: @@ -1324,10 +1324,10 @@ and Docker images will report: HTTP/1.1 200 OK Content-Type: application/json - {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} - {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index 47fe21e92e..fb3139d211 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -36,7 +36,7 @@ List containers [ { "Id": "8dfafdbc3a40", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", @@ -46,7 +46,7 @@ List containers }, { "Id": "9cd87474be90", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", @@ -56,7 +56,7 @@ List containers }, { "Id": "3176a2479c92", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", @@ -66,7 +66,7 @@ List containers }, { "Id": "4cb07b47f9fb", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", @@ -128,7 +128,7 @@ Create a container "date" ], "Entrypoint": "", - "Image": "base", + "Image": "ubuntu", "Volumes": { "/tmp": {} }, @@ -291,7 +291,7 @@ Return low-level information on the container `id` "date" ], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": {}, "VolumesFrom": "", "WorkingDir": "" @@ -939,7 +939,7 @@ Create an image, either by pulling it from the registry or by importing it **Example request**: - POST /images/create?fromImage=base HTTP/1.1 + POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: @@ -983,7 +983,7 @@ Return low-level information on the image `name` **Example request**: - GET /images/base/json HTTP/1.1 + GET /images/ubuntu/json HTTP/1.1 **Example response**: @@ -1009,7 +1009,7 @@ Return low-level information on the image `name` "Env": null, "Cmd": ["/bin/bash"], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" @@ -1033,7 +1033,7 @@ Return the history of the image `name` **Example request**: - GET /images/base/history HTTP/1.1 + GET /images/ubuntu/history HTTP/1.1 **Example response**: @@ -1471,10 +1471,10 @@ and Docker images will report: HTTP/1.1 200 OK Content-Type: application/json - {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} - {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index 9934ab7716..7999850035 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -36,7 +36,7 @@ List containers [ { "Id": "8dfafdbc3a40", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", @@ -46,7 +46,7 @@ List containers }, { "Id": "9cd87474be90", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", @@ -56,7 +56,7 @@ List containers }, { "Id": "3176a2479c92", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", @@ -66,7 +66,7 @@ List containers }, { "Id": "4cb07b47f9fb", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", @@ -128,7 +128,7 @@ Create a container "date" ], "Entrypoint": "", - "Image": "base", + "Image": "ubuntu", "Volumes": { "/tmp": {} }, @@ -291,7 +291,7 @@ Return low-level information on the container `id` "date" ], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": {}, "VolumesFrom": "", "WorkingDir": "" @@ -885,7 +885,7 @@ Create an image, either by pulling it from the registry or by importing it **Example request**: - POST /images/create?fromImage=base HTTP/1.1 + POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: @@ -929,7 +929,7 @@ Return low-level information on the image `name` **Example request**: - GET /images/base/json HTTP/1.1 + GET /images/ubuntu/json HTTP/1.1 **Example response**: @@ -955,7 +955,7 @@ Return low-level information on the image `name` "Env": null, "Cmd": ["/bin/bash"], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" @@ -979,7 +979,7 @@ Return the history of the image `name` **Example request**: - GET /images/base/history HTTP/1.1 + GET /images/ubuntu/history HTTP/1.1 **Example response**: @@ -1427,10 +1427,10 @@ and Docker images will report: HTTP/1.1 200 OK Content-Type: application/json - {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} - {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.17.md b/docs/sources/reference/api/docker_remote_api_v1.17.md index d6d0c1b4aa..68c4fb599d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.17.md +++ b/docs/sources/reference/api/docker_remote_api_v1.17.md @@ -36,7 +36,7 @@ List containers [ { "Id": "8dfafdbc3a40", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", @@ -46,7 +46,7 @@ List containers }, { "Id": "9cd87474be90", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", @@ -56,7 +56,7 @@ List containers }, { "Id": "3176a2479c92", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", @@ -66,7 +66,7 @@ List containers }, { "Id": "4cb07b47f9fb", - "Image": "base:latest", + "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", @@ -128,7 +128,7 @@ Create a container "date" ], "Entrypoint": "", - "Image": "base", + "Image": "ubuntu", "Volumes": { "/tmp": {} }, @@ -1096,7 +1096,7 @@ Create an image, either by pulling it from the registry or by importing it **Example request**: - POST /images/create?fromImage=base HTTP/1.1 + POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: @@ -1140,7 +1140,7 @@ Return low-level information on the image `name` **Example request**: - GET /images/base/json HTTP/1.1 + GET /images/ubuntu/json HTTP/1.1 **Example response**: @@ -1166,7 +1166,7 @@ Return low-level information on the image `name` "Env": null, "Cmd": ["/bin/bash"], "Dns": null, - "Image": "base", + "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" @@ -1190,7 +1190,7 @@ Return the history of the image `name` **Example request**: - GET /images/base/history HTTP/1.1 + GET /images/ubuntu/history HTTP/1.1 **Example response**: @@ -1589,10 +1589,10 @@ and Docker images will report: HTTP/1.1 200 OK Content-Type: application/json - {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} - {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} - {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: From c1e3f6196124f8c757a7017ae2bba7f8c05fde20 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 28 Jan 2015 16:30:00 -0800 Subject: [PATCH 075/653] Add distribution maintainers to maintainers files Signed-off-by: Derek McGowan (github: dmcgowan) --- MAINTAINERS | 30 +++++++++++++++++++++++++++++- graph/MAINTAINERS | 2 ++ registry/MAINTAINERS | 4 +++- 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index da7a2c851f..706a7d1cc9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -339,7 +339,10 @@ made through a pull request. "dmp42", "vbatts", "joffrey", - "samalba" + "samalba", + "sday", + "jlhawn", + "dmcg" ] [Org.Subsystems."build tools"] @@ -399,6 +402,16 @@ made through a pull request. Email = "ben@firshman.co.uk" Github = "bfirsh" + [people.dmcg] + Name = "Derek McGowan" + Email = "derek@docker.com" + Github = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + Github = "dmp42" + [people.ehazlett] Name = "Evan Hazlett" Email = "ejhazlett@gmail.com" @@ -424,11 +437,26 @@ made through a pull request. Email = "jess@docker.com" Github = "jfrazelle" + [people.jlhawn] + Name = "Josh Hawn" + Email = "josh.hawn@docker.com" + Github = "jlhawn" + + [people.joffrey] + Name = "Joffrey Fuhrer" + Email = "joffrey@docker.com" + Github = "shin-" + [people.lk4d4] Name = "Alexander Morozov" Email = "lk4d4@docker.com" Github = "lk4d4" + [people.sday] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + Github = "stevvooe" + [people.shykes] Name = "Solomon Hykes" Email = "solomon@docker.com" diff --git a/graph/MAINTAINERS b/graph/MAINTAINERS index e409454b5e..3d685b7453 100644 --- a/graph/MAINTAINERS +++ b/graph/MAINTAINERS @@ -3,3 +3,5 @@ Victor Vieux (@vieux) Michael Crosby (@crosbymichael) Cristian Staretu (@unclejack) Tibor Vass (@tiborvass) +Josh Hawn (@jlhawn) +Derek McGowan (@dmcgowan) diff --git a/registry/MAINTAINERS b/registry/MAINTAINERS index fdb03ed573..a75e15b4ef 100644 --- a/registry/MAINTAINERS +++ b/registry/MAINTAINERS @@ -1,5 +1,7 @@ Sam Alba (@samalba) Joffrey Fuhrer (@shin-) -Ken Cochrane (@kencochrane) Vincent Batts (@vbatts) Olivier Gambier (@dmp42) +Josh Hawn (@jlhawn) +Derek McGowan (@dmcgowan) +Stephen Day (@stevvooe) From e16bcc3928d0ae07cb9c02840fbb92f1ed2a7a3f Mon Sep 17 00:00:00 2001 From: Alexander Morozov Date: Wed, 28 Jan 2015 14:33:15 -0800 Subject: [PATCH 076/653] Fix logs, so now, old and followed logs has same format without [] Signed-off-by: Alexander Morozov --- daemon/logs.go | 3 ++- pkg/jsonlog/jsonlog.go | 2 +- pkg/jsonlog/jsonlog_test.go | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/daemon/logs.go b/daemon/logs.go index 6c9373f737..c16c7851fd 100644 --- a/daemon/logs.go +++ b/daemon/logs.go @@ -99,7 +99,8 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { } logLine := l.Log if times { - logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine) + // format can be "" or time format, so here can't be error + logLine, _ = l.Format(format) } if l.Stream == "stdout" && stdout { io.WriteString(job.Stdout, logLine) diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go index 3a96d86f82..e2c2a2cab6 100644 --- a/pkg/jsonlog/jsonlog.go +++ b/pkg/jsonlog/jsonlog.go @@ -23,7 +23,7 @@ func (jl *JSONLog) Format(format string) (string, error) { m, err := json.Marshal(jl) return string(m), err } - return fmt.Sprintf("[%s] %s", jl.Created.Format(format), jl.Log), nil + return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil } func (jl *JSONLog) Reset() { diff --git a/pkg/jsonlog/jsonlog_test.go b/pkg/jsonlog/jsonlog_test.go index 5ee5eda35c..fa53825b93 100644 --- a/pkg/jsonlog/jsonlog_test.go +++ b/pkg/jsonlog/jsonlog_test.go @@ -30,7 +30,8 @@ func TestWriteLog(t *testing.T) { if len(lines) != 30 { t.Fatalf("Must be 30 lines but got %d", len(lines)) } - logRe := regexp.MustCompile(`\[.*\] Line that thinks that it is log line from docker`) + // 30+ symbols, five more can come from system timezone + logRe := regexp.MustCompile(`.{30,} Line that thinks that it is log line from docker`) for _, l := range lines { if !logRe.MatchString(l) { t.Fatalf("Log line not in expected format: %q", l) From 07d190a61c60cff2f20186e700abca46f18d35ac Mon Sep 17 00:00:00 2001 From: guoxiuyan Date: Thu, 29 Jan 2015 10:15:56 +0800 Subject: [PATCH 077/653] Fix a minor typo Signed-off-by: Guo Xiuyan --- graph/push.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graph/push.go b/graph/push.go index b8fb09882e..0606c4603f 100644 --- a/graph/push.go +++ b/graph/push.go @@ -150,7 +150,7 @@ func (s *TagStore) pushImageToEndpoint(endpoint string, out io.Writer, remoteNam // wait for all the images that require pushes to be collected into a consumable map. shouldPush := <-pushes // finish by pushing any images and tags to the endpoint. The order that the images are pushed - // is very important that is why we are still itterating over the ordered list of imageIDs. + // is very important that is why we are still iterating over the ordered list of imageIDs. for _, id := range imageIDs { if _, push := shouldPush[id]; push { if _, err := s.pushImage(r, out, id, endpoint, repo.Tokens, sf); err != nil { From 761a020f9dbb5e5610dce0dc8c762de09f8a0d46 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 28 Jan 2015 17:11:51 -0800 Subject: [PATCH 078/653] Add builder folks to the top-level maintainers file Signed-off-by: Doug Davis --- MAINTAINERS | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index da7a2c851f..cc967c895c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -375,6 +375,14 @@ made through a pull request. "aanand" ] + [Org.Subsystem.builder] + + people = [ + "erikh", + "tibor", + "duglin" + ] + [people] @@ -399,6 +407,11 @@ made through a pull request. Email = "ben@firshman.co.uk" Github = "bfirsh" + [people.duglin] + Name = "Doug Davis" + Email = "dug@us.ibm.com" + Github = "duglin" + [people.ehazlett] Name = "Evan Hazlett" Email = "ejhazlett@gmail.com" From 0ce3a49d64bcfd81823fe0e9bf54c51ddf6e35d4 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 29 Jan 2015 20:35:37 +1000 Subject: [PATCH 079/653] DHE documentation placeholder and Navbar changes Signed-off-by: Sven Dowideit --- docs/mkdocs.yml | 5 +++++ .../docker-hub-enterprise/install-config.md | 8 ++++++++ docs/sources/docker-hub-enterprise/usage.md | 9 +++++++++ docs/theme/mkdocs/css/docs.css | 1 + docs/theme/mkdocs/css/main.css | 4 ---- docs/theme/mkdocs/header.html | 18 ++++++++++-------- docs/theme/mkdocs/nav.html | 4 ---- 7 files changed, 33 insertions(+), 16 deletions(-) create mode 100644 docs/sources/docker-hub-enterprise/install-config.md create mode 100644 docs/sources/docker-hub-enterprise/usage.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 6b8f4dc89a..de532a8261 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -69,6 +69,11 @@ pages: - ['docker-hub/builds.md', 'Docker Hub', 'Automated Builds'] - ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repo Guidelines'] +# Docker Hub Enterprise +- ['docker-hub-enterprise/index.md', '**HIDDEN**' ] +- ['docker-hub-enterprise/install-config.md', 'Docker Hub Enterprise', 'Installation and Configuration' ] +- ['docker-hub-enterprise/usage.md', 'Docker Hub Enterprise', 'User Guide' ] + # Examples: - ['examples/index.md', '**HIDDEN**'] - ['examples/nodejs_web_app.md', 'Examples', 'Dockerizing a Node.js web application'] diff --git a/docs/sources/docker-hub-enterprise/install-config.md b/docs/sources/docker-hub-enterprise/install-config.md new file mode 100644 index 0000000000..0b7bcfd6fe --- /dev/null +++ b/docs/sources/docker-hub-enterprise/install-config.md @@ -0,0 +1,8 @@ +page_title: Using Docker Hub Enterprise Installation +page_description: Docker Hub Enterprise Installation +page_keywords: docker hub enterprise + +# Docker Hub Enterprise Installation + +Documenation coming soon. + diff --git a/docs/sources/docker-hub-enterprise/usage.md b/docs/sources/docker-hub-enterprise/usage.md new file mode 100644 index 0000000000..252223ef70 --- /dev/null +++ b/docs/sources/docker-hub-enterprise/usage.md @@ -0,0 +1,9 @@ +page_title: Using Docker Hub Enterprise +page_description: Docker Hub Enterprise +page_keywords: docker hub enterprise + +# Docker Hub Enterprise + +Documenation coming soon. + + diff --git a/docs/theme/mkdocs/css/docs.css b/docs/theme/mkdocs/css/docs.css index 068a0003ef..6a5eeb5142 100644 --- a/docs/theme/mkdocs/css/docs.css +++ b/docs/theme/mkdocs/css/docs.css @@ -60,6 +60,7 @@ pre { /* Main Navigation */ #nav_menu > #docsnav { max-width: 940px; + width: 940px; margin: 0 auto; } #nav_menu > #docsnav > #nav_search { diff --git a/docs/theme/mkdocs/css/main.css b/docs/theme/mkdocs/css/main.css index ed7c189a09..0c2d7830f8 100644 --- a/docs/theme/mkdocs/css/main.css +++ b/docs/theme/mkdocs/css/main.css @@ -801,10 +801,6 @@ div + .form-inline { transition: box-shadow linear 0.2s, background linear 0.3s, width linear 0.3s; width: 140px; } -#topmostnav .navbar-index-search .search-query:focus, -#topmostnav .navbar-index-search .search-query.focused { - width: 200px; -} #topmostnav.public { border-bottom: none; height: 80px; diff --git a/docs/theme/mkdocs/header.html b/docs/theme/mkdocs/header.html index a3b1d9bd78..6622f93301 100644 --- a/docs/theme/mkdocs/header.html +++ b/docs/theme/mkdocs/header.html @@ -1,13 +1,14 @@ -