Merge pull request #6230 from tiborvass/bump_v0.12.0

Bump version to v0.12.0
This commit is contained in:
Michael Crosby 2014-06-06 11:41:06 -07:00
commit c624caa949
465 changed files with 18288 additions and 9386 deletions

View file

@ -6,14 +6,16 @@ Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com
<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
<guillaume.charmes@docker.com> <guillaume@docker.com>
<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
<guillaume.charmes@docker.com> <guillaume@charmes.net>
<kencochrane@gmail.com> <KenCochrane@gmail.com>
<sridharr@activestate.com> <github@srid.name>
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@gmx.net>
Thatcher Peskens <thatcher@docker.com>
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> jpetazzo <jerome.petazzoni@dotcloud.com>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> <jp@enix.org>
Joffrey F <joffrey@dotcloud.com>
<joffrey@dotcloud.com> <f.joffrey@gmail.com>
Joffrey F <joffrey@docker.com>
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
Tim Terhorst <mynamewastaken+git@gmail.com>
Andy Smith <github@anarkystic.com>
<kalessin@kalessin.fr> <louis@dotcloud.com>
@ -23,7 +25,6 @@ Andy Smith <github@anarkystic.com>
<victor.vieux@docker.com> <victor@docker.com>
<victor.vieux@docker.com> <vieux@docker.com>
<dominik@honnef.co> <dominikh@fork-bomb.org>
Thatcher Peskens <thatcher@dotcloud.com>
<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
Walter Stanish <walter@pratyeka.org>
<daniel@gasienica.ch> <dgasienica@zynga.com>
@ -54,7 +55,26 @@ Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
<gurjeet@singh.im> <singh.gurjeet@gmail.com>
<shawn@churchofgit.com> <shawnlandden@gmail.com>
<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
<solomon@dotcloud.com> <solomon.hykes@dotcloud.com>
<SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
Sven Dowideit <SvenDowideit@home.org.au> ¨Sven <¨SvenDowideit@home.org.au¨>
<solomon@docker.com> <solomon.hykes@dotcloud.com>
<solomon@docker.com> <solomon@dotcloud.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
unclejack <unclejacksons@gmail.com> <unclejack@users.noreply.github.com>
<alexl@redhat.com> <alexander.larsson@gmail.com>
Alexandr Morozov <lk4d4math@gmail.com>
<git.nivoc@neverbox.com> <kuehnle@online.de>
O.S. Tezer <ostezer@gmail.com>
<ostezer@gmail.com> <ostezer@users.noreply.github.com>
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
<justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
<taim@bosboot.org> <maztaim@users.noreply.github.com>
<viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
<vbatts@redhat.com> <vbatts@hashbangbash.com>
<altsysrq@gmail.com> <iamironbob@gmail.com>
Sridhar Ratnakumar <sridharr@activestate.com>
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
Liang-Chi Hsieh <viirya@gmail.com>
Aleksa Sarai <cyphar@cyphar.com>
Will Weaver <monkey@buildingbananas.com>

133
AUTHORS
View file

@ -1,44 +1,62 @@
# This file lists all individuals having contributed content to the repository.
# If you're submitting a patch, please add your name here in alphabetical order as part of the patch.
#
# For a list of active project maintainers, see the MAINTAINERS file.
#
# For how it is generated, see `.mailmap`.
Aanand Prasad <aanand.prasad@gmail.com>
Aaron Feng <aaron.feng@gmail.com>
Aaron Huslage <huslage@gmail.com>
Abel Muiño <amuino@gmail.com>
Adam Miller <admiller@redhat.com>
Adam Singer <financeCoding@gmail.com>
Aditya <aditya@netroy.in>
Adrian Mouat <adrian.mouat@gmail.com>
alambike <alambike@gmail.com>
Aleksa Sarai <cyphar@cyphar.com>
Alexander Larsson <alexl@redhat.com>
Alexandr Morozov <lk4d4math@gmail.com>
Alexey Kotlyarov <alexey@infoxchange.net.au>
Alexey Shamrin <shamrin@gmail.com>
Alex Gaynor <alex.gaynor@gmail.com>
Alexis THOMAS <fr.alexisthomas@gmail.com>
almoehi <almoehi@users.noreply.github.com>
Al Tobey <al@ooyala.com>
amangoel <amangoel@gmail.com>
Andrea Luzzardi <aluzzardi@gmail.com>
Andreas Savvides <andreas@editd.com>
Andreas Tiefenthaler <at@an-ti.eu>
Andrea Turli <andrea.turli@gmail.com>
Andrew Duckworth <grillopress@gmail.com>
Andrew Macgregor <andrew.macgregor@agworld.com.au>
Andrew Munsell <andrew@wizardapps.net>
Andrews Medina <andrewsmedina@gmail.com>
Andrew Williams <williams.andrew@gmail.com>
Andy Chambers <anchambers@paypal.com>
andy diller <dillera@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Andy Kipp <andy@rstudio.com>
Andy Rothfusz <github@metaliveblog.com>
Andy Smith <github@anarkystic.com>
Anthony Bishopric <git@anthonybishopric.com>
Anton Nikitin <anton.k.nikitin@gmail.com>
Antony Messerli <amesserl@rackspace.com>
apocas <petermdias@gmail.com>
Arnaud Porterie <icecrime@gmail.com>
Asbjørn Enge <asbjorn@hanafjedle.net>
Barnaby Gray <barnaby@pickle.me.uk>
Barry Allard <barry.allard@gmail.com>
Bartłomiej Piotrowski <b@bpiotrowski.pl>
Benjamin Atkin <ben@benatkin.com>
Benoit Chesneau <bchesneau@gmail.com>
Ben Sargent <ben@brokendigits.com>
Ben Toews <mastahyeti@gmail.com>
Ben Wiklund <ben@daisyowl.com>
Bernerd Schaefer <bj.schaefer@gmail.com>
Bhiraj Butala <abhiraj.butala@gmail.com>
bin liu <liubin0329@users.noreply.github.com>
Bouke Haarsma <bouke@webatoom.nl>
Brandon Liu <bdon@bdon.org>
Brandon Philips <brandon@ifup.org>
Brian Dorsey <brian@dorseys.org>
Brian Flad <bflad417@gmail.com>
Brian Goff <cpuguy83@gmail.com>
Brian McCallister <brianm@skife.org>
Brian Olsen <brian@maven-group.org>
@ -46,11 +64,15 @@ Brian Shumate <brian@couchbase.com>
Briehan Lombaard <briehan.lombaard@gmail.com>
Bruno Bigras <bigras.bruno@gmail.com>
Bryan Matsuo <bryan.matsuo@gmail.com>
Bryan Murphy <bmurphy1976@gmail.com>
Caleb Spare <cespare@gmail.com>
Calen Pennington <cale@edx.org>
Cameron Boehmer <cameron.boehmer@gmail.com>
Carl X. Su <bcbcarl@gmail.com>
Charles Hooper <charles.hooper@dotcloud.com>
Charles Lindsay <chaz@chazomatic.us>
Charles Merriam <charles.merriam@gmail.com>
Charlie Lewis <charliel@lab41.org>
Chia-liang Kao <clkao@clkao.org>
Chris St. Pierre <chris.a.st.pierre@gmail.com>
Christopher Currie <codemonkey+github@gmail.com>
@ -61,6 +83,7 @@ Colin Dunklau <colin.dunklau@gmail.com>
Colin Rice <colin@daedrum.net>
Cory Forsyth <cory.forsyth@gmail.com>
cressie176 <github@stephen-cresswell.net>
Dafydd Crosby <dtcrsby@gmail.com>
Dan Buch <d.buch@modcloth.com>
Dan Hirsch <thequux@upstandinghackers.com>
Daniel Exner <dex@dragonslave.de>
@ -72,30 +95,45 @@ Daniel Nordberg <dnordberg@gmail.com>
Daniel Robinson <gottagetmac@gmail.com>
Daniel Von Fange <daniel@leancoder.com>
Daniel YC Lin <dlin.tw@gmail.com>
Dan Keder <dan.keder@gmail.com>
Dan McPherson <dmcphers@redhat.com>
Danny Berger <dpb587@gmail.com>
Danny Yates <danny@codeaholics.org>
Dan Stine <sw@stinemail.com>
Dan Walsh <dwalsh@redhat.com>
Dan Williams <me@deedubs.com>
Darren Coxall <darren@darrencoxall.com>
Darren Shepherd <darren.s.shepherd@gmail.com>
David Anderson <dave@natulte.net>
David Calavera <david.calavera@gmail.com>
David Gageot <david@gageot.net>
David Mcanulty <github@hellspark.com>
David Röthlisberger <david@rothlis.net>
David Sissitka <me@dsissitka.com>
Deni Bertovic <deni@kset.org>
Dinesh Subhraveti <dineshs@altiscale.com>
Djibril Koné <kone.djibril@gmail.com>
dkumor <daniel@dkumor.com>
Dmitry Demeshchuk <demeshchuk@gmail.com>
Dolph Mathews <dolph.mathews@gmail.com>
Dominik Honnef <dominik@honnef.co>
Don Spaulding <donspauldingii@gmail.com>
Dražen Lučanin <kermit666@gmail.com>
Dr Nic Williams <drnicwilliams@gmail.com>
Dustin Sallings <dustin@spy.net>
Edmund Wagner <edmund-wagner@web.de>
Eiichi Tsukata <devel@etsukata.com>
Eivind Uggedal <eivind@uggedal.com>
Elias Probst <mail@eliasprobst.eu>
Emil Hernvall <emil@quench.at>
Emily Rose <emily@contactvibe.com>
Eric Hanchrow <ehanchrow@ine.com>
Eric Lee <thenorthsecedes@gmail.com>
Eric Myhre <hash@exultant.us>
Erik Hollensbe <erik+github@hollensbe.org>
Erno Hopearuoho <erno.hopearuoho@gmail.com>
eugenkrizo <eugen.krizo@gmail.com>
Evan Hazlett <ejhazlett@gmail.com>
Evan Krall <krall@yelp.com>
Evan Phoenix <evan@fallingsnow.net>
Evan Wies <evan@neomantra.net>
@ -106,6 +144,7 @@ Fabio Rehm <fgrehm@gmail.com>
Fabrizio Regini <freegenie@gmail.com>
Faiz Khan <faizkhan00@gmail.com>
Fareed Dudhia <fareeddudhia@googlemail.com>
Felix Rabe <felix@rabe.io>
Fernando <fermayo@gmail.com>
Flavio Castelli <fcastelli@suse.com>
Francisco Souza <f@souza.cc>
@ -117,8 +156,11 @@ Gabe Rosenhouse <gabe@missionst.com>
Gabriel Monroy <gabriel@opdemand.com>
Galen Sampson <galen.sampson@gmail.com>
Gareth Rushgrove <gareth@morethanseven.net>
Geoffrey Bachelet <grosfrais@gmail.com>
Gereon Frey <gereon.frey@dynport.de>
German DZ <germ@ndz.com.ar>
Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
Goffert van Gool <goffert@phusion.nl>
Graydon Hoare <graydon@pobox.com>
Greg Thornton <xdissent@me.com>
grunny <mwgrunny@gmail.com>
@ -127,28 +169,40 @@ Gurjeet Singh <gurjeet@singh.im>
Guruprasad <lgp171188@gmail.com>
Harley Laue <losinggeneration@gmail.com>
Hector Castro <hectcastro@gmail.com>
Hobofan <goisser94@gmail.com>
Hunter Blanks <hunter@twilio.com>
Ian Truslove <ian.truslove@gmail.com>
ILYA Khlopotov <ilya.khlopotov@gmail.com>
inglesp <peter.inglesby@gmail.com>
Isaac Dupree <antispam@idupree.com>
Isabel Jimenez <contact.isabeljimenez@gmail.com>
Isao Jonas <isao.jonas@gmail.com>
Jack Danger Canty <jackdanger@squareup.com>
jakedt <jake@devtable.com>
Jake Moshenko <jake@devtable.com>
James Allen <jamesallen0108@gmail.com>
James Carr <james.r.carr@gmail.com>
James DeFelice <james.defelice@ishisystems.com>
James Harrison Fisher <jameshfisher@gmail.com>
James Mills <prologic@shortcircuit.net.au>
James Turnbull <james@lovedthanlost.net>
jaseg <jaseg@jaseg.net>
Jason McVetta <jason.mcvetta@gmail.com>
Jason Plum <jplum@devonit.com>
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
Jeff Lindsay <progrium@gmail.com>
Jeremy Grosser <jeremy@synack.me>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
Jesse Dubay <jesse@thefortytwo.net>
Jilles Oldenbeuving <ojilles@gmail.com>
Jim Alateras <jima@comware.com.au>
Jimmy Cuadra <jimmy@jimmycuadra.com>
Joe Beda <joe.github@bedafamily.com>
Joel Handwell <joelhandwell@gmail.com>
Joe Shaw <joe@joeshaw.org>
Joe Van Dyk <joe@tanga.com>
Joffrey F <joffrey@dotcloud.com>
Joffrey F <joffrey@docker.com>
Johan Euphrosine <proppy@google.com>
Johannes 'fish' Ziemke <github@freigeist.org>
Johan Rydberg <johan.rydberg@gmail.com>
@ -157,7 +211,9 @@ John Feminella <jxf@jxf.me>
John Gardiner Myers <jgmyers@proofpoint.com>
John Warwick <jwarwick@gmail.com>
Jonas Pfenniger <jonas@pfenniger.name>
Jonathan McCrohan <jmccrohan@gmail.com>
Jonathan Mueller <j.mueller@apoveda.ch>
Jonathan Pares <jonathanpa@users.noreply.github.com>
Jonathan Rudenberg <jonathan@titanous.com>
Jon Wedaman <jweede@gmail.com>
Joost Cassee <joost@cassee.net>
@ -172,13 +228,17 @@ Julien Barbier <write0@gmail.com>
Julien Dubois <julien.dubois@gmail.com>
Justin Force <justin.force@gmail.com>
Justin Plock <jplock@users.noreply.github.com>
Justin Simonelis <justin.p.simonelis@gmail.com>
Karan Lyons <karan@karanlyons.com>
Karl Grzeszczak <karlgrz@gmail.com>
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
Keli Hu <dev@keli.hu>
Ken Cochrane <kencochrane@gmail.com>
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
Kevin Clark <kevin.clark@gmail.com>
Kevin J. Lynagh <kevin@keminglabs.com>
Kevin Menard <kevin@nirvdrum.com>
Kevin Wallace <kevin@pentabarf.net>
Keyvan Fatehi <keyvanfatehi@gmail.com>
kim0 <email.ahmedkamal@googlemail.com>
@ -187,14 +247,20 @@ Kimbro Staken <kstaken@kstaken.com>
Kiran Gangadharan <kiran.daredevil@gmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
Kyle Conroy <kyle.j.conroy@gmail.com>
lalyos <lalyos@yahoo.com>
Lance Chen <cyen0312@gmail.com>
Lars R. Damerow <lars@pixar.com>
Laurie Voss <github@seldo.com>
Lewis Peckover <lew+github@lew.io>
Liang-Chi Hsieh <viirya@gmail.com>
Lokesh Mandvekar <lsm5@redhat.com>
Louis Opter <kalessin@kalessin.fr>
lukaspustina <lukas.pustina@centerdevice.com>
lukemarsden <luke@digital-crocus.com>
Mahesh Tiyyagura <tmahesh@gmail.com>
Manuel Meurer <manuel@krautcomputing.com>
Manuel Woelker <github@manuel.woelker.org>
Marc Abramowitz <marc@marc-abramowitz.com>
Marc Kuo <kuomarc2@gmail.com>
Marco Hennings <marco.hennings@freiheit.com>
Marcus Farkas <toothlessgear@finitebox.com>
@ -206,23 +272,32 @@ Marko Mikulicic <mmikulicic@gmail.com>
Markus Fix <lispmeister@gmail.com>
Martijn van Oosterhout <kleptog@svana.org>
Martin Redmond <martin@tinychat.com>
Mason Malone <mason.malone@gmail.com>
Mateusz Sulima <sulima.mateusz@gmail.com>
Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
Matt Apperson <me@mattapperson.com>
Matt Bachmann <bachmann.matt@gmail.com>
Matt Haggard <haggardii@gmail.com>
Matthew Mueller <mattmuelle@gmail.com>
Matthias Klumpp <matthias@tenstral.net>
Matthias Kühnle <git.nivoc@neverbox.com>
mattymo <raytrac3r@gmail.com>
Maxime Petazzoni <max@signalfuse.com>
Maxim Treskin <zerthurd@gmail.com>
Max Shytikov <mshytikov@gmail.com>
meejah <meejah@meejah.ca>
Michael Brown <michael@netdirect.ca>
Michael Crosby <michael@crosbymichael.com>
Michael Gorsuch <gorsuch@github.com>
Michael Neale <michael.neale@gmail.com>
Michael Stapelberg <michael+gh@stapelberg.de>
Miguel Angel Fernández <elmendalerenda@gmail.com>
Mike Gaffney <mike@uberu.com>
Mike MacCana <mike.maccana@gmail.com>
Mike Naberezny <mike@naberezny.com>
Mikhail Sobolev <mss@mawhrin.net>
Mohit Soni <mosoni@ebay.com>
Morgante Pell <morgante.pell@morgante.net>
Morten Siebuhr <sbhr@sbhr.dk>
Nan Monnand Deng <monnand@gmail.com>
Nate Jones <nate@endot.org>
@ -234,22 +309,26 @@ Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
Nick Stinemates <nick@stinemates.org>
Nicolas Dudebout <nicolas.dudebout@gatech.edu>
Nicolas Kaiser <nikai@nikai.net>
noducks <onemannoducks@gmail.com>
Nolan Darilek <nolan@thewordnerd.info>
odk- <github@odkurzacz.org>
Oguz Bilgic <fisyonet@gmail.com>
Ole Reifschneider <mail@ole-reifschneider.de>
O.S.Tezer <ostezer@gmail.com>
O.S. Tezer <ostezer@gmail.com>
pandrew <letters@paulnotcom.se>
Pascal Borreli <pascal@borreli.com>
pattichen <craftsbear@gmail.com>
Paul Annesley <paul@annesley.cc>
Paul Bowsher <pbowsher@globalpersonals.co.uk>
Paul Hammond <paul@paulhammond.org>
Paul Jimenez <pj@place.org>
Paul Lietar <paul@lietar.net>
Paul Morie <pmorie@gmail.com>
Paul Nasrat <pnasrat@gmail.com>
Paul <paul9869@gmail.com>
Peter Braden <peterbraden@peterbraden.co.uk>
Peter Waller <peter@scraperwiki.com>
Phillip Alexander <git@phillipalexander.io>
Phil Spitler <pspitler@gmail.com>
Piergiuliano Bossi <pgbossi@gmail.com>
Pierre-Alain RIVIERE <pariviere@ippon.fr>
@ -257,6 +336,8 @@ Piotr Bogdan <ppbogdan@gmail.com>
pysqz <randomq@126.com>
Quentin Brossard <qbrossard@gmail.com>
Rafal Jeczalik <rjeczalik@gmail.com>
Rajat Pandit <rp@rajatpandit.com>
Ralph Bean <rbean@redhat.com>
Ramkumar Ramachandra <artagnon@gmail.com>
Ramon van Alteren <ramon@vanalteren.nl>
Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
@ -266,54 +347,71 @@ Richo Healey <richo@psych0tik.net>
Rick Bradley <rick@users.noreply.github.com>
Robert Obryk <robryk@gmail.com>
Roberto G. Hashioka <roberto.hashioka@docker.com>
Roberto Hashioka <roberto_hashioka@hotmail.com>
robpc <rpcann@gmail.com>
Rodrigo Vaz <rodrigo.vaz@gmail.com>
Roel Van Nyen <roel.vannyen@gmail.com>
Roger Peppe <rogpeppe@gmail.com>
Rohit Jnagal <jnagal@google.com>
Roland Moriz <rmoriz@users.noreply.github.com>
Rovanion Luckey <rovanion.luckey@gmail.com>
Ryan Aslett <github@mixologic.com>
Ryan Fowler <rwfowler@gmail.com>
Ryan O'Donnell <odonnellryanc@gmail.com>
Ryan Seto <ryanseto@yak.net>
Ryan Thomas <rthomas@atlassian.com>
Sam Alba <sam.alba@gmail.com>
Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
Sam Rijs <srijs@airpost.net>
Samuel Andaya <samuel@andaya.net>
Scott Bessler <scottbessler@gmail.com>
Scott Collier <emailscottcollier@gmail.com>
Sean Cronin <seancron@gmail.com>
Sean P. Kane <skane@newrelic.com>
Sébastien Stormacq <sebsto@users.noreply.github.com>
Shawn Landden <shawn@churchofgit.com>
Shawn Siefkas <shawn.siefkas@meredith.com>
Shih-Yuan Lee <fourdollars@gmail.com>
shin- <joffrey@docker.com>
Silas Sewell <silas@sewell.org>
Simon Taranto <simon.taranto@gmail.com>
Sindhu S <sindhus@live.in>
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
Solomon Hykes <solomon@dotcloud.com>
Solomon Hykes <solomon@docker.com>
Song Gao <song@gao.io>
Soulou <leo@unbekandt.eu>
Sridatta Thatipamala <sthatipamala@gmail.com>
Sridhar Ratnakumar <sridharr@activestate.com>
Steeve Morin <steeve.morin@gmail.com>
Stefan Praszalowicz <stefan@greplin.com>
Steven Burgess <steven.a.burgess@hotmail.com>
sudosurootdev <sudosurootdev@gmail.com>
Sven Dowideit <svendowideit@home.org.au>
Sven Dowideit <SvenDowideit@home.org.au>
Sylvain Bellemare <sylvain.bellemare@ezeep.com>
tang0th <tang0th@gmx.com>
Tatsuki Sugiura <sugi@nemui.org>
Tehmasp Chaudhri <tehmasp@gmail.com>
Thatcher Peskens <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@docker.com>
Thermionix <bond711@gmail.com>
Thijs Terlouw <thijsterlouw@gmail.com>
Thomas Bikeev <thomas.bikeev@mac.com>
Thomas Frössman <thomasf@jossystem.se>
Thomas Hansen <thomas.hansen@gmail.com>
Thomas LEVEIL <thomasleveil@gmail.com>
Thomas Schroeter <thomas@cliqz.com>
Tianon Gravi <admwiggin@gmail.com>
Tim Bosse <maztaim@users.noreply.github.com>
Tibor Vass <teabee89@gmail.com>
Tim Bosse <taim@bosboot.org>
Timothy Hobbs <timothyhobbs@seznam.cz>
Tim Ruffles <oi@truffles.me.uk>
Tim Terhorst <mynamewastaken+git@gmail.com>
tjmehta <tj@init.me>
Tobias Bieniek <Tobias.Bieniek@gmx.de>
Tobias Schmidt <ts@soundcloud.com>
Tobias Schwab <tobias.schwab@dynport.de>
Todd Lunter <tlunter@gmail.com>
Tom Fotherby <tom+github@peopleperhour.com>
Tom Hulihan <hulihan.tom159@gmail.com>
Tommaso Visconti <tommaso.visconti@gmail.com>
Tony Daws <tony@daws.ca>
Travis Cline <travis.cline@gmail.com>
Tyler Brock <tyler.brock@gmail.com>
Tzu-Jung Lee <roylee17@gmail.com>
@ -322,26 +420,35 @@ unclejack <unclejacksons@gmail.com>
vgeta <gopikannan.venugopalsamy@gmail.com>
Victor Coisne <victor.coisne@dotcloud.com>
Victor Lyuboslavsky <victor@victoreda.com>
Victor Marmol <vmarmol@google.com>
Victor Vieux <victor.vieux@docker.com>
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
Vincent Batts <vbatts@redhat.com>
Vincent Bernat <bernat@luffy.cx>
Vincent Mayers <vincent.mayers@inbloom.org>
Vincent Woo <me@vincentwoo.com>
Vinod Kulkarni <vinod.kulkarni@gmail.com>
Vishnu Kannan <vishnuk@google.com>
Vitor Monteiro <vmrmonteiro@gmail.com>
Vivek Agarwal <me@vivek.im>
Vladimir Bulyga <xx@ccxx.cc>
Vladimir Kirillov <proger@wilab.org.ua>
Vladimir Rutsky <iamironbob@gmail.com>
Vladimir Rutsky <altsysrq@gmail.com>
Walter Leibbrandt <github@wrl.co.za>
Walter Stanish <walter@pratyeka.org>
WarheadsSE <max@warheads.net>
Wes Morgan <cap10morgan@gmail.com>
Will Dietz <w@wdtz.org>
William Delanoue <william.delanoue@gmail.com>
William Henry <whenry@redhat.com>
Will Rouesnel <w.rouesnel@gmail.com>
Will Weaver <monkey@buildingbananas.com>
Xiuming Chen <cc@cxm.cc>
Yang Bai <hamo.by@gmail.com>
Yasunori Mahata <nori@mahata.net>
Yurii Rashkovskii <yrashk@gmail.com>
Zain Memon <zain@inzain.net>
Zaiste! <oh@zaiste.net>
Zilin Du <zilin.du@gmail.com>
zimbatm <zimbatm@zimbatm.com>
zqh <zqhxuyuan@gmail.com>

View file

@ -1,5 +1,17 @@
# Changelog
## 0.12.0 (2014-06-05)
#### Notable features since 0.11.0
* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file
* Inherit file permissions from the host on `ADD`
* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer
* The `images` command has a `-f`/`--filter` option to filter the list of images
* Add `--force-rm` to clean up after a failed build
* Standardize JSON keys in Remote API to CamelCase
* Pull from a docker run now assumes `latest` tag if not specified
* Enhance security on Linux capabilities and device nodes
## 0.11.1 (2014-05-07)
#### Registry

View file

@ -77,13 +77,8 @@ well as a clean documentation build. See ``docs/README.md`` for more
information on building the docs and how docs get released.
Write clean code. Universally formatted code promotes ease of writing, reading,
and maintenance. Always run `go fmt` before committing your changes. Most
editors have plugins that do this automatically, and there's also a git
pre-commit hook:
```
curl -o .git/hooks/pre-commit https://raw.githubusercontent.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit
```
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
committing your changes. Most editors have plugins that do this automatically.
Pull requests descriptions should be as clear as possible and include a
reference to all the issues that they address.
@ -108,10 +103,8 @@ same commit so that a revert would remove all traces of the feature or fix.
Commits that fix or close an issue should include a reference like `Closes #XXX`
or `Fixes #XXX`, which will automatically close the issue when merged.
Add your name to the AUTHORS file, but make sure the list is sorted and your
name and email address match your git configuration. The AUTHORS file is
regenerated occasionally from the git commit history, so a mismatch may result
in your changes being overwritten.
Please do not add yourself to the AUTHORS file, as it is regenerated
regularly from the Git history.
### Merge approval
@ -182,7 +175,7 @@ One way to automate this, is customise your get ``commit.template`` by adding
a ``prepare-commit-msg`` hook to your docker checkout:
```
curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
```
* Note: the above script expects to find your GitHub user name in ``git config --get github.user``
@ -192,7 +185,10 @@ curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/mas
There are several exceptions to the signing requirement. Currently these are:
* Your patch fixes spelling or grammar errors.
* Your patch is a single line change to documentation.
* Your patch is a single line change to documentation contained in the
`docs` directory.
* Your patch fixes Markdown formatting or syntax errors in the
documentation contained in the `docs` directory.
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io)

View file

@ -24,7 +24,7 @@
#
docker-version 0.6.1
FROM ubuntu:13.10
FROM ubuntu:14.04
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
# Packaged dependencies
@ -41,6 +41,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
libapparmor-dev \
libcap-dev \
libsqlite3-dev \
lxc=1.0* \
mercurial \
pandoc \
reprepro \
@ -49,10 +50,6 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
s3cmd=1.1.0* \
--no-install-recommends
# Get and compile LXC 0.8 (since it is the most stable)
RUN git clone --no-checkout https://github.com/lxc/lxc.git /usr/local/lxc && cd /usr/local/lxc && git checkout -q lxc-0.8.0
RUN cd /usr/local/lxc && ./autogen.sh && ./configure --disable-docs && make && make install
# Get lvm2 source for compiling statically
RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
@ -84,7 +81,7 @@ RUN go get code.google.com/p/go.tools/cmd/cover
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
# Get the "busybox" image source so we can build locally instead of pulling
RUN git clone https://github.com/jpetazzo/docker-busybox.git /docker-busybox
RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
# Setup s3cmd config
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
@ -92,6 +89,10 @@ RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/dotcloud/docker
ENV DOCKER_BUILDTAGS apparmor selinux

View file

@ -2,6 +2,8 @@ Solomon Hykes <solomon@docker.com> (@shykes)
Guillaume J. Charmes <guillaume@docker.com> (@creack)
Victor Vieux <vieux@docker.com> (@vieux)
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
.mailmap: Tianon Gravi <admwiggin@gmail.com> (@tianon)
.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
AUTHORS: Tianon Gravi <admwiggin@gmail.com> (@tianon)
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)

View file

@ -35,7 +35,7 @@ docs-release: docs-build
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh
test: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-unit test-integration test-integration-cli
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli
test-unit: build
$(DOCKER_RUN_DOCKER) hack/make.sh test-unit

View file

@ -190,3 +190,9 @@ It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
Licensing
=========
Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text.

View file

@ -1 +1 @@
0.11.1
0.12.0

5
api/README.md Normal file
View file

@ -0,0 +1,5 @@
This directory contains code pertaining to the Docker API:
- Used by the docker client when comunicating with the docker deamon
- Used by third party tools wishing to interface with the docker deamon

View file

@ -23,6 +23,9 @@ var funcMap = template.FuncMap{
}
func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
if len(name) == 0 {
return nil, false
}
methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
method := reflect.ValueOf(cli).MethodByName(methodName)
if !method.IsValid() {
@ -73,7 +76,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsC
}
if in != nil {
if file, ok := in.(*os.File); ok {
if file, ok := out.(*os.File); ok {
terminalFd = file.Fd()
isTerminal = term.IsTerminal(terminalFd)
}

View file

@ -13,7 +13,7 @@ import (
"os"
"os/exec"
"path"
goruntime "runtime"
"runtime"
"strconv"
"strings"
"syscall"
@ -26,11 +26,14 @@ import (
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/opts"
"github.com/dotcloud/docker/pkg/signal"
"github.com/dotcloud/docker/pkg/term"
"github.com/dotcloud/docker/pkg/units"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/utils/filters"
)
func (cli *DockerCli) CmdHelp(args ...string) error {
@ -46,7 +49,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET)
for _, command := range [][]string{
{"attach", "Attach to a running container"},
{"build", "Build a container from a Dockerfile"},
{"build", "Build an image from a Dockerfile"},
{"commit", "Create a new image from a container's changes"},
{"cp", "Copy files/folders from the containers filesystem to the host path"},
{"diff", "Inspect changes on a container's filesystem"},
@ -62,6 +65,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
{"login", "Register or Login to the docker registry server"},
{"logs", "Fetch the logs of a container"},
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
{"pause", "Pause all processes within a container"},
{"ps", "List containers"},
{"pull", "Pull an image or a repository from the docker registry server"},
{"push", "Push an image or a repository to the docker registry server"},
@ -75,6 +79,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
{"stop", "Stop a running container"},
{"tag", "Tag an image into a repository"},
{"top", "Lookup the running processes of a container"},
{"unpause", "Unpause a paused container"},
{"version", "Show the docker version information"},
{"wait", "Block until a container stops, then print its exit code"},
} {
@ -104,11 +109,12 @@ func (cli *DockerCli) CmdInsert(args ...string) error {
}
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH")
cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new image from the source code at PATH")
tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success")
suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -160,6 +166,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
if _, err = os.Stat(filename); os.IsNotExist(err) {
return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
}
if err = utils.ValidateContextDirectory(root); err != nil {
return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
}
context, err = archive.Tar(root, archive.Uncompressed)
}
var body io.Reader
@ -167,9 +176,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
// FIXME: ProgressReader shouldn't be this annoying to use
if context != nil {
sf := utils.NewStreamFormatter(false)
body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context")
body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Sending build context to Docker daemon")
}
// Upload the build context
// Send the build context
v := &url.Values{}
//Check if the given image name can be resolved
@ -193,6 +202,12 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
}
if *rm {
v.Set("rm", "1")
} else {
v.Set("rm", "0")
}
if *forceRm {
v.Set("forcerm", "1")
}
cli.LoadConfigFile()
@ -359,7 +374,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION)
}
fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION)
fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version())
fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
if dockerversion.GITCOMMIT != "" {
fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT)
}
@ -384,16 +399,8 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" {
fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion)
}
fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
release := utils.GetReleaseVersion()
if release != "" {
fmt.Fprintf(cli.out, "Last stable version: %s", release)
if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) {
fmt.Fprintf(cli.out, ", please update docker")
}
fmt.Fprintf(cli.out, "\n")
}
fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
return nil
}
@ -555,10 +562,14 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
func (cli *DockerCli) CmdStart(args ...string) error {
var (
cErr chan error
tty bool
cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
)
if err := cmd.Parse(args); err != nil {
return nil
}
@ -567,29 +578,24 @@ func (cli *DockerCli) CmdStart(args ...string) error {
return nil
}
var (
cErr chan error
tty bool
)
if *attach || *openStdin {
if cmd.NArg() > 1 {
return fmt.Errorf("You cannot start and attach multiple containers at once.")
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false))
steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)
if err != nil {
return err
}
container := &api.Container{}
err = json.Unmarshal(body, container)
if err != nil {
env := engine.Env{}
if err := env.Decode(steam); err != nil {
return err
}
config := env.GetSubEnv("Config")
tty = config.GetBool("Tty")
tty = container.Config.Tty
if !container.Config.Tty {
if !tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer signal.StopCatch(sigc)
}
@ -598,15 +604,17 @@ func (cli *DockerCli) CmdStart(args ...string) error {
v := url.Values{}
v.Set("stream", "1")
if *openStdin && container.Config.OpenStdin {
if *openStdin && config.GetBool("OpenStdin") {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
cErr = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil)
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil)
})
}
@ -643,6 +651,52 @@ func (cli *DockerCli) CmdStart(args ...string) error {
return nil
}
func (cli *DockerCli) CmdUnpause(args ...string) error {
cmd := cli.Subcmd("unpause", "CONTAINER", "Unpause all processes within a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, false)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdPause(args ...string) error {
cmd := cli.Subcmd("pause", "CONTAINER", "Pause all processes within a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, false)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to pause container named %s", name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdInspect(args ...string) error {
cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image")
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.")
@ -769,34 +823,38 @@ func (cli *DockerCli) CmdPort(args ...string) error {
}
var (
port = cmd.Arg(1)
proto = "tcp"
parts = strings.SplitN(port, "/", 2)
container api.Container
port = cmd.Arg(1)
proto = "tcp"
parts = strings.SplitN(port, "/", 2)
)
if len(parts) == 2 && len(parts[1]) != 0 {
port = parts[0]
proto = parts[1]
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false))
steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)
if err != nil {
return err
}
err = json.Unmarshal(body, &container)
if err != nil {
env := engine.Env{}
if err := env.Decode(steam); err != nil {
return err
}
ports := nat.PortMap{}
if err := env.GetSubEnv("NetworkSettings").GetJson("Ports", &ports); err != nil {
return err
}
if frontends, exists := container.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
if frontends, exists := ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
}
} else {
return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0))
return nil
}
return nil
return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0))
}
// 'docker rmi IMAGE' removes all images with the name IMAGE
@ -884,14 +942,14 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
fmt.Fprintf(w, "%s\t", utils.TruncateID(outID))
}
fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
if *noTrunc {
fmt.Fprintf(w, "%s\t", out.Get("CreatedBy"))
} else {
fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45))
}
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size")))
fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("Size")))
} else {
if *noTrunc {
fmt.Fprintln(w, outID)
@ -1142,6 +1200,9 @@ func (cli *DockerCli) CmdImages(args ...string) error {
flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
var flFilter opts.ListOpts
cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -1150,11 +1211,32 @@ func (cli *DockerCli) CmdImages(args ...string) error {
return nil
}
filter := cmd.Arg(0)
// Consolidate all filter flags, and sanity check them early.
// They'll get process in the daemon/server.
imageFilterArgs := filters.Args{}
for _, f := range flFilter.GetAll() {
var err error
imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs)
if err != nil {
return err
}
}
matchName := cmd.Arg(0)
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
if *flViz || *flTree {
body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false))
v := url.Values{
"all": []string{"1"},
}
if len(imageFilterArgs) > 0 {
filterJson, err := filters.ToParam(imageFilterArgs)
if err != nil {
return err
}
v.Set("filters", filterJson)
}
body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
@ -1184,13 +1266,13 @@ func (cli *DockerCli) CmdImages(args ...string) error {
}
}
if filter != "" {
if filter == image.Get("Id") || filter == utils.TruncateID(image.Get("Id")) {
if matchName != "" {
if matchName == image.Get("Id") || matchName == utils.TruncateID(image.Get("Id")) {
startImage = image
}
for _, repotag := range image.GetList("RepoTags") {
if repotag == filter {
if repotag == matchName {
startImage = image
}
}
@ -1208,7 +1290,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
root := engine.NewTable("Created", 1)
root.Add(startImage)
cli.WalkTree(*noTrunc, root, byParent, "", printNode)
} else if filter == "" {
} else if matchName == "" {
cli.WalkTree(*noTrunc, roots, byParent, "", printNode)
}
if *flViz {
@ -1216,8 +1298,17 @@ func (cli *DockerCli) CmdImages(args ...string) error {
}
} else {
v := url.Values{}
if len(imageFilterArgs) > 0 {
filterJson, err := filters.ToParam(imageFilterArgs)
if err != nil {
return err
}
v.Set("filters", filterJson)
}
if cmd.NArg() == 1 {
v.Set("filter", filter)
// FIXME rename this parameter, to not be confused with the filters flag
v.Set("filter", matchName)
}
if *all {
v.Set("all", "1")
@ -1249,7 +1340,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
}
if !*quiet {
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize")))
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(out.GetInt64("VirtualSize")))
} else {
fmt.Fprintln(w, outID)
}
@ -1323,7 +1414,7 @@ func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix stri
imageID = utils.TruncateID(image.Get("Id"))
}
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize")))
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(image.GetInt64("VirtualSize")))
if image.GetList("RepoTags")[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", "))
} else {
@ -1408,12 +1499,12 @@ func (cli *DockerCli) CmdPs(args ...string) error {
outCommand = utils.Trunc(outCommand, 20)
}
ports.ReadListFrom([]byte(out.Get("Ports")))
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ","))
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ","))
if *size {
if out.GetInt("SizeRootFs") > 0 {
fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs")))
fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(out.GetInt64("SizeRw")), units.HumanSize(out.GetInt64("SizeRootFs")))
} else {
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw")))
fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("SizeRw")))
}
} else {
fmt.Fprint(w, "\n")
@ -1581,72 +1672,84 @@ func (cli *DockerCli) CmdDiff(args ...string) error {
}
func (cli *DockerCli) CmdLogs(args ...string) error {
cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
var (
cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
)
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
steam, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false)
if err != nil {
return err
}
container := &api.Container{}
err = json.Unmarshal(body, container)
if err != nil {
env := engine.Env{}
if err := env.Decode(steam); err != nil {
return err
}
v := url.Values{}
v.Set("stdout", "1")
v.Set("stderr", "1")
if *times {
v.Set("timestamps", "1")
}
if *follow && container.State.Running {
if *follow {
v.Set("follow", "1")
}
if err := cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
return err
}
return nil
return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil)
}
func (cli *DockerCli) CmdAttach(args ...string) error {
cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin")
proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
var (
cmd = cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin")
proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
)
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false)
if err != nil {
return err
}
container := &api.Container{}
err = json.Unmarshal(body, container)
if err != nil {
env := engine.Env{}
if err := env.Decode(stream); err != nil {
return err
}
if !container.State.Running {
if !env.GetSubEnv("State").GetBool("Running") {
return fmt.Errorf("You cannot attach to a stopped container, start it first")
}
if container.Config.Tty && cli.isTerminal {
var (
config = env.GetSubEnv("Config")
tty = config.GetBool("Tty")
)
if tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
utils.Debugf("Error monitoring TTY size: %s", err)
}
@ -1656,19 +1759,20 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
v := url.Values{}
v.Set("stream", "1")
if !*noStdin && container.Config.OpenStdin {
if !*noStdin && config.GetBool("OpenStdin") {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
if *proxy && !container.Config.Tty {
if *proxy && !tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer signal.StopCatch(sigc)
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil); err != nil {
return err
}
@ -1686,7 +1790,8 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
func (cli *DockerCli) CmdSearch(args ...string) error {
cmd := cli.Subcmd("search", "TERM", "Search the docker index for images")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds")
trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds")
automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds")
stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars")
if err := cmd.Parse(args); err != nil {
return nil
@ -1709,9 +1814,9 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
return err
}
w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n")
fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n")
for _, out := range outs.Data {
if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) {
if ((*automated || *trusted) && (!out.GetBool("is_trusted") && !out.GetBool("is_automated"))) || (*stars > out.GetInt("star_count")) {
continue
}
desc := strings.Replace(out.Get("description"), "\n", " ", -1)
@ -1725,7 +1830,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
}
fmt.Fprint(w, "\t")
if out.GetBool("is_trusted") {
if out.GetBool("is_automated") || out.GetBool("is_trusted") {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\n")
@ -1839,6 +1944,10 @@ func (cli *DockerCli) CmdRun(args ...string) error {
v := url.Values{}
repos, tag := utils.ParseRepositoryTag(config.Image)
// pull only the image tagged 'latest' if no tag was specified
if tag == "" {
tag = "latest"
}
v.Set("fromImage", repos)
v.Set("tag", tag)
@ -2058,7 +2167,7 @@ func (cli *DockerCli) CmdCp(args ...string) error {
}
if statusCode == 200 {
if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil {
if err := archive.Untar(stream, copyData.Get("HostPath"), &archive.TarOptions{NoLchown: true}); err != nil {
return err
}
}

View file

@ -200,7 +200,7 @@ func waitForExit(cli *DockerCli, containerId string) (int, error) {
// getExitCode perform an inspect on the container. It returns
// the running state and the exit code.
func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false))
steam, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false)
if err != nil {
// If we can't connect, then the daemon probably died.
if err != ErrConnectionRefused {
@ -208,11 +208,14 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
}
return false, -1, nil
}
c := &api.Container{}
if err := json.Unmarshal(body, c); err != nil {
var result engine.Env
if err := result.Decode(steam); err != nil {
return false, -1, err
}
return c.State.Running, c.State.ExitCode, nil
state := result.GetSubEnv("State")
return state.GetBool("Running"), state.GetInt("ExitCode"), nil
}
func (cli *DockerCli) monitorTtySize(id string) error {

View file

@ -2,15 +2,16 @@ package api
import (
"fmt"
"mime"
"strings"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/pkg/version"
"github.com/dotcloud/docker/utils"
"mime"
"strings"
)
const (
APIVERSION version.Version = "1.11"
APIVERSION version.Version = "1.12"
DEFAULTHTTPHOST = "127.0.0.1"
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
)
@ -30,7 +31,7 @@ func DisplayablePorts(ports *engine.Table) string {
ports.Sort()
for _, port := range ports.Data {
if port.Get("IP") == "" {
result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type")))
result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PrivatePort"), port.Get("Type")))
} else {
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
}

View file

@ -1,18 +0,0 @@
package api
import (
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/runconfig"
)
type Container struct {
Config runconfig.Config
HostConfig runconfig.HostConfig
State struct {
Running bool
ExitCode int
}
NetworkSettings struct {
Ports nat.PortMap
}
}

View file

@ -122,17 +122,17 @@ func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter
var (
authConfig, err = ioutil.ReadAll(r.Body)
job = eng.Job("auth")
status string
stdoutBuffer = bytes.NewBuffer(nil)
)
if err != nil {
return err
}
job.Setenv("authConfig", string(authConfig))
job.Stdout.AddString(&status)
job.Stdout.Add(stdoutBuffer)
if err = job.Run(); err != nil {
return err
}
if status != "" {
if status := engine.Tail(stdoutBuffer, 1); status != "" {
var env engine.Env
env.Set("Status", status)
return writeJSON(w, http.StatusOK, env)
@ -165,6 +165,36 @@ func postContainersKill(eng *engine.Engine, version version.Version, w http.Resp
return nil
}
func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
job := eng.Job("pause", vars["name"])
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
job := eng.Job("unpause", vars["name"])
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
@ -188,6 +218,8 @@ func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseW
job = eng.Job("images")
)
job.Setenv("filters", r.Form.Get("filters"))
// FIXME this parameter could just be a match filter
job.Setenv("filter", r.Form.Get("filter"))
job.Setenv("all", r.Form.Get("all"))
@ -244,7 +276,7 @@ func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWrite
return err
}
var job = eng.Job("events", r.RemoteAddr)
var job = eng.Job("events")
streamJSON(job, w, true)
job.Setenv("since", r.Form.Get("since"))
job.Setenv("until", r.Form.Get("until"))
@ -338,7 +370,7 @@ func getContainersLogs(eng *engine.Engine, version version.Version, w http.Respo
}
var (
job = eng.Job("inspect", vars["name"], "container")
job = eng.Job("container_inspect", vars["name"])
c, err = job.Stdout.AddEnv()
)
if err != nil {
@ -393,9 +425,10 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit
return err
}
var (
config engine.Env
env engine.Env
job = eng.Job("commit", r.Form.Get("container"))
config engine.Env
env engine.Env
job = eng.Job("commit", r.Form.Get("container"))
stdoutBuffer = bytes.NewBuffer(nil)
)
if err := config.Decode(r.Body); err != nil {
utils.Errorf("%s", err)
@ -407,12 +440,11 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit
job.Setenv("comment", r.Form.Get("comment"))
job.SetenvSubEnv("config", &config)
var id string
job.Stdout.AddString(&id)
job.Stdout.Add(stdoutBuffer)
if err := job.Run(); err != nil {
return err
}
env.Set("Id", id)
env.Set("Id", engine.Tail(stdoutBuffer, 1))
return writeJSON(w, http.StatusCreated, env)
}
@ -502,32 +534,6 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons
return job.Run()
}
// FIXME: 'insert' is deprecated as of 0.10, and should be removed in a future version.
func postImagesInsert(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
job := eng.Job("insert", vars["name"], r.Form.Get("url"), r.Form.Get("path"))
if version.GreaterThan("1.0") {
job.SetenvBool("json", true)
streamJSON(job, w, false)
} else {
job.Stdout.Add(w)
}
if err := job.Run(); err != nil {
if !job.Stdout.Used() {
return err
}
sf := utils.NewStreamFormatter(version.GreaterThan("1.0"))
w.Write(sf.FormatError(err))
}
return nil
}
func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
@ -603,17 +609,17 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re
return nil
}
var (
out engine.Env
job = eng.Job("create", r.Form.Get("name"))
outWarnings []string
outId string
warnings = bytes.NewBuffer(nil)
out engine.Env
job = eng.Job("create", r.Form.Get("name"))
outWarnings []string
stdoutBuffer = bytes.NewBuffer(nil)
warnings = bytes.NewBuffer(nil)
)
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
// Read container ID from the first line of stdout
job.Stdout.AddString(&outId)
job.Stdout.Add(stdoutBuffer)
// Read warnings from stderr
job.Stderr.Add(warnings)
if err := job.Run(); err != nil {
@ -624,7 +630,7 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re
for scanner.Scan() {
outWarnings = append(outWarnings, scanner.Text())
}
out.Set("Id", outId)
out.Set("Id", engine.Tail(stdoutBuffer, 1))
out.SetList("Warnings", outWarnings)
return writeJSON(w, http.StatusCreated, out)
}
@ -720,20 +726,16 @@ func postContainersWait(eng *engine.Engine, version version.Version, w http.Resp
return fmt.Errorf("Missing parameter")
}
var (
env engine.Env
status string
job = eng.Job("wait", vars["name"])
env engine.Env
stdoutBuffer = bytes.NewBuffer(nil)
job = eng.Job("wait", vars["name"])
)
job.Stdout.AddString(&status)
job.Stdout.Add(stdoutBuffer)
if err := job.Run(); err != nil {
return err
}
// Parse a 16-bit encoded integer to map typical unix exit status.
_, err := strconv.ParseInt(status, 10, 16)
if err != nil {
return err
}
env.Set("StatusCode", status)
env.Set("StatusCode", engine.Tail(stdoutBuffer, 1))
return writeJSON(w, http.StatusOK, env)
}
@ -759,7 +761,7 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re
}
var (
job = eng.Job("inspect", vars["name"], "container")
job = eng.Job("container_inspect", vars["name"])
c, err = job.Stdout.AddEnv()
)
if err != nil {
@ -823,7 +825,7 @@ func wsContainersAttach(eng *engine.Engine, version version.Version, w http.Resp
return fmt.Errorf("Missing parameter")
}
if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil {
if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil {
return err
}
@ -851,9 +853,11 @@ func getContainersByName(eng *engine.Engine, version version.Version, w http.Res
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var job = eng.Job("inspect", vars["name"], "container")
var job = eng.Job("container_inspect", vars["name"])
if version.LessThan("1.12") {
job.SetenvBool("dirty", true)
}
streamJSON(job, w, false)
job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job
return job.Run()
}
@ -861,9 +865,11 @@ func getImagesByName(eng *engine.Engine, version version.Version, w http.Respons
if vars == nil {
return fmt.Errorf("Missing parameter")
}
var job = eng.Job("inspect", vars["name"], "image")
var job = eng.Job("image_inspect", vars["name"])
if version.LessThan("1.12") {
job.SetenvBool("dirty", true)
}
streamJSON(job, w, false)
job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job
return job.Run()
}
@ -907,12 +913,20 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
} else {
job.Stdout.Add(utils.NewWriteFlusher(w))
}
if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") {
job.Setenv("rm", "1")
} else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") {
job.Setenv("rm", "1")
} else {
job.Setenv("rm", r.FormValue("rm"))
}
job.Stdin.Add(r.Body)
job.Setenv("remote", r.FormValue("remote"))
job.Setenv("t", r.FormValue("t"))
job.Setenv("q", r.FormValue("q"))
job.Setenv("nocache", r.FormValue("nocache"))
job.Setenv("rm", r.FormValue("rm"))
job.Setenv("forcerm", r.FormValue("forcerm"))
job.SetenvJson("authConfig", authConfig)
job.SetenvJson("configFile", configFile)
@ -1071,12 +1085,13 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
"/commit": postCommit,
"/build": postBuild,
"/images/create": postImagesCreate,
"/images/{name:.*}/insert": postImagesInsert,
"/images/load": postImagesLoad,
"/images/{name:.*}/push": postImagesPush,
"/images/{name:.*}/tag": postImagesTag,
"/containers/create": postContainersCreate,
"/containers/{name:.*}/kill": postContainersKill,
"/containers/{name:.*}/pause": postContainersPause,
"/containers/{name:.*}/unpause": postContainersUnpause,
"/containers/{name:.*}/restart": postContainersRestart,
"/containers/{name:.*}/start": postContainersStart,
"/containers/{name:.*}/stop": postContainersStop,
@ -1193,6 +1208,7 @@ func changeGroup(addr string, nameOrGid string) error {
// ListenAndServe sets up the required http.Server and gets it listening for
// each addr passed in and does protocol specific checking.
func ListenAndServe(proto, addr string, job *engine.Job) error {
var l net.Listener
r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
if err != nil {
return err
@ -1208,7 +1224,20 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
}
}
l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock)
var oldmask int
if proto == "unix" {
oldmask = syscall.Umask(0777)
}
if job.GetenvBool("BufferRequests") {
l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock)
} else {
l, err = net.Listen(proto, addr)
}
if proto == "unix" {
syscall.Umask(oldmask)
}
if err != nil {
return err
}
@ -1246,9 +1275,6 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
}
case "unix":
if err := os.Chmod(addr, 0660); err != nil {
return err
}
socketGroup := job.Getenv("SocketGroup")
if socketGroup != "" {
if err := changeGroup(addr, socketGroup); err != nil {
@ -1260,6 +1286,9 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
}
}
}
if err := os.Chmod(addr, 0660); err != nil {
return err
}
default:
return fmt.Errorf("Invalid protocol format.")
}
@ -1280,10 +1309,6 @@ func ServeApi(job *engine.Job) engine.Status {
)
activationLock = make(chan struct{})
if err := job.Eng.Register("acceptconnections", AcceptConnections); err != nil {
return job.Error(err)
}
for _, protoAddr := range protoAddrs {
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
if len(protoAddrParts) != 2 {
@ -1310,7 +1335,9 @@ func AcceptConnections(job *engine.Job) engine.Status {
go systemd.SdNotify("READY=1")
// close the lock so the listeners start accepting connections
close(activationLock)
if activationLock != nil {
close(activationLock)
}
return engine.StatusOK
}

3
archive/README.md Normal file
View file

@ -0,0 +1,3 @@
This code provides helper functions for dealing with archive files.
**TODO**: Move this to either `pkg` or (if not possible) to `utils`.

View file

@ -1,14 +1,12 @@
package archive
import (
"bufio"
"bytes"
"compress/bzip2"
"compress/gzip"
"errors"
"fmt"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"
"io/ioutil"
"os"
@ -17,6 +15,10 @@ import (
"path/filepath"
"strings"
"syscall"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
type (
@ -26,6 +28,7 @@ type (
TarOptions struct {
Includes []string
Compression Compression
NoLchown bool
}
)
@ -41,26 +44,16 @@ const (
)
func DetectCompression(source []byte) Compression {
sourceLen := len(source)
for compression, m := range map[Compression][]byte{
Bzip2: {0x42, 0x5A, 0x68},
Gzip: {0x1F, 0x8B, 0x08},
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} {
fail := false
if len(m) > sourceLen {
if len(source) < len(m) {
utils.Debugf("Len too short")
continue
}
i := 0
for _, b := range m {
if b != source[i] {
fail = true
break
}
i++
}
if !fail {
if bytes.Compare(m, source[:len(m)]) == 0 {
return compression
}
}
@ -74,31 +67,24 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
}
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
buf := make([]byte, 10)
totalN := 0
for totalN < 10 {
n, err := archive.Read(buf[totalN:])
if err != nil {
if err == io.EOF {
return nil, fmt.Errorf("Tarball too short")
}
return nil, err
}
totalN += n
utils.Debugf("[tar autodetect] n: %d", n)
buf := bufio.NewReader(archive)
bs, err := buf.Peek(10)
if err != nil {
return nil, err
}
compression := DetectCompression(buf)
wrap := io.MultiReader(bytes.NewReader(buf), archive)
utils.Debugf("[tar autodetect] n: %v", bs)
compression := DetectCompression(bs)
switch compression {
case Uncompressed:
return ioutil.NopCloser(wrap), nil
return ioutil.NopCloser(buf), nil
case Gzip:
return gzip.NewReader(wrap)
return gzip.NewReader(buf)
case Bzip2:
return ioutil.NopCloser(bzip2.NewReader(wrap)), nil
return ioutil.NopCloser(bzip2.NewReader(buf)), nil
case Xz:
return xzDecompress(wrap)
return xzDecompress(buf)
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
@ -194,7 +180,7 @@ func addTarFile(path, name string, tw *tar.Writer) error {
return nil
}
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) error {
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
@ -255,7 +241,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) e
return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
}
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
return err
}
@ -309,8 +295,11 @@ func escapeName(name string) string {
return string(escaped)
}
// Tar creates an archive from the directory at `path`, only including files whose relative
// paths are included in `filter`. If `filter` is nil, then all files are included.
// TarFilter creates an archive from the directory at `srcPath` with `options`, and returns it as a
// stream of bytes.
//
// Files are included according to `options.Includes`, default to including all files.
// Stream is compressed according to `options.Compression', default to Uncompressed.
func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) {
pipeReader, pipeWriter := io.Pipe()
@ -418,14 +407,16 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if fi.IsDir() && hdr.Name == "." {
continue
}
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return err
}
}
}
if err := createTarFile(path, dest, hdr, tr); err != nil {
if err := createTarFile(path, dest, hdr, tr, options == nil || !options.NoLchown); err != nil {
return err
}

View file

@ -3,7 +3,6 @@ package archive
import (
"bytes"
"fmt"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"
"io/ioutil"
"os"
@ -11,6 +10,8 @@ import (
"path"
"testing"
"time"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
func TestCmdStreamLargeStderr(t *testing.T) {
@ -132,8 +133,37 @@ func TestTarUntar(t *testing.T) {
// Failing prevents the archives from being uncompressed during ADD
func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
err := createTarFile("pax_global_header", "some_dir", &hdr, nil)
err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true)
if err != nil {
t.Fatal(err)
}
}
// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things.
// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work.
func TestUntarUstarGnuConflict(t *testing.T) {
f, err := os.Open("testdata/broken.tar")
if err != nil {
t.Fatal(err)
}
found := false
tr := tar.NewReader(f)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
t.Fatal(err)
}
if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" {
found = true
break
}
}
if !found {
t.Fatal("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
}
}

View file

@ -3,15 +3,16 @@ package archive
import (
"bytes"
"fmt"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
type ChangeType int
@ -293,13 +294,23 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) {
// Compare two directories and generate an array of Change objects describing the changes
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
oldRoot, err := collectFileInfo(oldDir)
if err != nil {
return nil, err
}
newRoot, err := collectFileInfo(newDir)
if err != nil {
return nil, err
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
oldRoot, err1 = collectFileInfo(oldDir)
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, err
}
}
return newRoot.Changes(oldRoot), nil
@ -341,12 +352,13 @@ func ExportChanges(dir string, changes []Change) (Archive, error) {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: time.Now(),
AccessTime: time.Now(),
ChangeTime: time.Now(),
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := tw.WriteHeader(hdr); err != nil {
utils.Debugf("Can't write whiteout header: %s\n", err)

View file

@ -2,14 +2,14 @@ package archive
import (
"fmt"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
@ -18,15 +18,6 @@ import (
func mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return syscall.NsecToTimespec(time.UnixNano())
}
// ApplyLayer parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`.
@ -89,7 +80,7 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr); err != nil {
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil {
return err
}
}
@ -136,7 +127,7 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
srcData = tmpFile
}
if err := createTarFile(path, dest, srcHdr, srcData); err != nil {
if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil {
return err
}

BIN
archive/testdata/broken.tar vendored Normal file

Binary file not shown.

16
archive/time_linux.go Normal file
View file

@ -0,0 +1,16 @@
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return syscall.NsecToTimespec(time.UnixNano())
}

View file

@ -0,0 +1,16 @@
// +build !linux
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View file

@ -1,11 +1,16 @@
package builtins
import (
api "github.com/dotcloud/docker/api/server"
"runtime"
"github.com/dotcloud/docker/api"
apiserver "github.com/dotcloud/docker/api/server"
"github.com/dotcloud/docker/daemon/networkdriver/bridge"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/server"
"github.com/dotcloud/docker/utils"
)
func Register(eng *engine.Engine) error {
@ -15,12 +20,18 @@ func Register(eng *engine.Engine) error {
if err := remote(eng); err != nil {
return err
}
if err := eng.Register("version", dockerVersion); err != nil {
return err
}
return registry.NewService().Install(eng)
}
// remote: a RESTful api for cross-docker communication
func remote(eng *engine.Engine) error {
return eng.Register("serveapi", api.ServeApi)
if err := eng.Register("serveapi", apiserver.ServeApi); err != nil {
return err
}
return eng.Register("acceptconnections", apiserver.AcceptConnections)
}
// daemon: a default execution and storage backend for Docker on Linux,
@ -44,3 +55,21 @@ func daemon(eng *engine.Engine) error {
}
return eng.Register("init_networkdriver", bridge.InitDriver)
}
// builtins jobs independent of any subsystem
func dockerVersion(job *engine.Job) engine.Status {
v := &engine.Env{}
v.Set("Version", dockerversion.VERSION)
v.SetJson("ApiVersion", api.APIVERSION)
v.Set("GitCommit", dockerversion.GITCOMMIT)
v.Set("GoVersion", runtime.Version())
v.Set("Os", runtime.GOOS)
v.Set("Arch", runtime.GOARCH)
if kernelVersion, err := utils.GetKernelVersion(); err == nil {
v.Set("KernelVersion", kernelVersion.String())
}
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
}

View file

@ -116,7 +116,7 @@ fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
DEVPTS_MULTIPLE_INSTANCES
CGROUPS CGROUP_DEVICE
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_SCHED
MACVLAN VETH BRIDGE
NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}

View file

@ -539,7 +539,7 @@ _docker_search()
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--no-trunc -t --trusted -s --stars" -- "$cur" ) )
COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) )
;;
*)
;;

View file

@ -71,7 +71,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container"
# build
complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build a container from a Dockerfile'
complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build'
@ -229,7 +229,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print
complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image in the docker index'
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least xxx stars'
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s t -l trusted -d 'Only show trusted builds'
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds'
# start
complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container'

View file

@ -6,7 +6,7 @@
# /data volume is owned by sysadmin.
# USAGE:
# # Download data Dockerfile
# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile
# wget http://raw.githubusercontent.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile
#
# # Build data image
# docker build -t data .

View file

@ -7,7 +7,7 @@
# sound devices. Tested on Debian 7.2
# USAGE:
# # Download Iceweasel Dockerfile
# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile
# wget http://raw.githubusercontent.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile
#
# # Build iceweasel image
# docker build -t iceweasel .

View file

@ -4,6 +4,8 @@
# Provides: docker
# Required-Start: $syslog $remote_fs
# Required-Stop: $syslog $remote_fs
# Should-Start: cgroupfs-mount cgroup-lite
# Should-Stop: cgroupfs-mount cgroup-lite
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Create lightweight, portable, self-sufficient containers.

View file

@ -3,7 +3,7 @@
# /etc/rc.d/init.d/docker
#
# Daemon for docker.io
#
#
# chkconfig: 2345 95 95
# description: Daemon for docker.io
@ -49,6 +49,13 @@ start() {
$exec -d $other_args &>> $logfile &
pid=$!
touch $lockfile
# wait up to 10 seconds for the pidfile to exist. see
# https://github.com/dotcloud/docker/issues/5359
tries=0
while [ ! -f $pidfile -a $tries -lt 10 ]; do
sleep 1
tries=$((tries + 1))
done
success
echo
else

View file

@ -1,6 +1,6 @@
description "Docker daemon"
start on filesystem
start on (local-filesystems and net-device-up IFACE!=lo)
stop on runlevel [!2345]
limit nofile 524288 1048576
limit nproc 524288 1048576

View file

@ -0,0 +1,206 @@
% DOCKERFILE(5) Docker User Manuals
% Zac Dover
% May 2014
# NAME
Dockerfile - automate the steps of creating a Docker image
# INTRODUCTION
The **Dockerfile** is a configuration file that automates the steps of creating
a Docker image. It is similar to a Makefile. Docker reads instructions from the
**Dockerfile** to automate the steps otherwise performed manually to create an
image. To build an image, create a file called **Dockerfile**. The
**Dockerfile** describes the steps taken to assemble the image. When the
**Dockerfile** has been created, call the **docker build** command, using the
path of directory that contains **Dockerfile** as the argument.
# SYNOPSIS
INSTRUCTION arguments
For example:
FROM image
# DESCRIPTION
A Dockerfile is a file that automates the steps of creating a Docker image.
A Dockerfile is similar to a Makefile.
# USAGE
**sudo docker build .**
-- runs the steps and commits them, building a final image
The path to the source repository defines where to find the context of the
build. The build is run by the docker daemon, not the CLI. The whole
context must be transferred to the daemon. The Docker CLI reports
"Sending build context to Docker daemon" when the context is sent to the daemon.
**sudo docker build -t repository/tag .**
-- specifies a repository and tag at which to save the new image if the build
succeeds. The Docker daemon runs the steps one-by-one, commiting the result
to a new image if necessary before finally outputting the ID of the new
image. The Docker daemon automatically cleans up the context it is given.
Docker re-uses intermediate images whenever possible. This significantly
accelerates the *docker build* process.
# FORMAT
**FROM image**
or
**FROM image:tag**
-- The FROM instruction sets the base image for subsequent instructions. A
valid Dockerfile must have FROM as its first instruction. The image can be any
valid image. It is easy to start by pulling an image from the public
repositories.
-- FROM must be he first non-comment instruction in Dockerfile.
-- FROM may appear multiple times within a single Dockerfile in order to create
multiple images. Make a note of the last image id output by the commit before
each new FROM command.
-- If no tag is given to the FROM instruction, latest is assumed. If the used
tag does not exist, an error is returned.
**MAINTAINER**
--The MAINTAINER instruction sets the Author field for the generated images.
**RUN**
--RUN has two forms:
**RUN <command>**
-- (the command is run in a shell - /bin/sh -c)
**RUN ["executable", "param1", "param2"]**
--The above is executable form.
--The RUN instruction executes any commands in a new layer on top of the
current image and commits the results. The committed image is used for the next
step in Dockerfile.
--Layering RUN instructions and generating commits conforms to the core
concepts of Docker where commits are cheap and containers can be created from
any point in the history of an image. This is similar to source control. The
exec form makes it possible to avoid shell string munging. The exec form makes
it possible to RUN commands using a base image that does not contain /bin/sh.
**CMD**
--CMD has three forms:
**CMD ["executable", "param1", "param2"]** This is the preferred form, the
exec form.
**CMD ["param1", "param2"]** This command provides default parameters to
ENTRYPOINT)
**CMD command param1 param2** This command is run as a shell.
--There can be only one CMD in a Dockerfile. If more than one CMD is listed, only
the last CMD takes effect.
The main purpose of a CMD is to provide defaults for an executing container.
These defaults may include an executable, or they can omit the executable. If
they omit the executable, an ENTRYPOINT must be specified.
When used in the shell or exec formats, the CMD instruction sets the command to
be executed when running the image.
If you use the shell form of of the CMD, the <command> executes in /bin/sh -c:
**FROM ubuntu**
**CMD echo "This is a test." | wc -**
If you run <command> wihtout a shell, then you must express the command as a
JSON arry and give the full path to the executable. This array form is the
preferred form of CMD. All additional parameters must be individually expressed
as strings in the array:
**FROM ubuntu**
**CMD ["/usr/bin/wc","--help"]**
To make the container run the same executable every time, use ENTRYPOINT in
combination with CMD.
If the user specifies arguments to docker run, the specified commands override
the default in CMD.
Do not confuse **RUN** with **CMD**. RUN runs a command and commits the result. CMD
executes nothing at build time, but specifies the intended command for the
image.
**EXPOSE**
--**EXPOSE <port> [<port>...]**
The **EXPOSE** instruction informs Docker that the container listens on the
specified network ports at runtime. Docker uses this information to
interconnect containers using links, and to set up port redirection on the host
system.
**ENV**
--**ENV <key> <value>**
The ENV instruction sets the environment variable <key> to
the value <value>. This value is passed to all future RUN instructions. This is
functionally equivalent to prefixing the command with **<key>=<value>**. The
environment variables that are set with ENV persist when a container is run
from the resulting image. Use docker inspect to inspect these values, and
change them using docker run **--env <key>=<value>.**
Note that setting Setting **ENV DEBIAN_FRONTEND noninteractive** may cause
unintended consequences, because it will persist when the container is run
interactively, as with the following command: **docker run -t -i image bash**
**ADD**
--**ADD <src> <dest>** The ADD instruction copies new files from <src> and adds them
to the filesystem of the container at path <dest>. <src> must be the path to a
file or directory relative to the source directory that is being built (the
context of the build) or a remote file URL. <dest> is the absolute path to
which the source is copied inside the target container. All new files and
directories are created with mode 0755, with uid and gid 0.
**ENTRYPOINT**
--**ENTRYPOINT** has two forms: ENTRYPOINT ["executable", "param1", "param2"]
(This is like an exec, and is the preferred form.) ENTRYPOINT command param1
param2 (This is running as a shell.) An ENTRYPOINT helps you configure a
container that can be run as an executable. When you specify an ENTRYPOINT,
the whole container runs as if it was only that executable. The ENTRYPOINT
instruction adds an entry command that is not overwritten when arguments are
passed to docker run. This is different from the behavior of CMD. This allows
arguments to be passed to the entrypoint, for instance docker run <image> -d
passes the -d argument to the ENTRYPOINT. Specify parameters either in the
ENTRYPOINT JSON array (as in the preferred exec form above), or by using a CMD
statement. Parameters in the ENTRYPOINT are not overwritten by the docker run
arguments. Parameters specifies via CMD are overwritten by docker run
arguments. Specify a plain string for the ENTRYPOINT, and it will execute in
/bin/sh -c, like a CMD instruction:
FROM ubuntu
ENTRYPOINT wc -l -
This means that the Dockerfile's image always takes stdin as input (that's
what "-" means), and prints the number of lines (that's what "-l" means). To
make this optional but default, use a CMD:
FROM ubuntu
CMD ["-l", "-"]
ENTRYPOINT ["/usr/bin/wc"]
**VOLUME**
--**VOLUME ["/data"]**
The VOLUME instruction creates a mount point with the specified name and marks
it as holding externally-mounted volumes from the native host or from other
containers.
**USER**
-- **USER daemon**
The USER instruction sets the username or UID that is used when running the
image.
**WORKDIR**
-- **WORKDIR /path/to/workdir**
The WORKDIR instruction sets the working directory for the **RUN**, **CMD**, and **ENTRYPOINT** Dockerfile commands that follow it.
It can be used multiple times in a single Dockerfile. Relative paths are defined relative to the path of the previous **WORKDIR** instruction. For example:
**WORKDIR /a WORKDIR /b WORKDIR c RUN pwd**
In the above example, the output of the **pwd** command is **a/b/c**.
**ONBUILD**
-- **ONBUILD [INSTRUCTION]**
The ONBUILD instruction adds a trigger instruction to the image, which is
executed at a later time, when the image is used as the base for another
build. The trigger is executed in the context of the downstream build, as
if it had been inserted immediately after the FROM instruction in the
downstream Dockerfile. Any build instruction can be registered as a
trigger. This is useful if you are building an image to be
used as a base for building other images, for example an application build
environment or a daemon to be customized with a user-specific
configuration. For example, if your image is a reusable python
application builder, it requires application source code to be
added in a particular directory, and might require a build script
to be called after that. You can't just call ADD and RUN now, because
you don't yet have access to the application source code, and it
is different for each application build. Providing
application developers with a boilerplate Dockerfile to copy-paste
into their application is inefficient, error-prone, and
difficult to update because it mixes with application-specific code.
The solution is to use **ONBUILD** to register instructions in advance, to
run later, during the next build stage.
# HISTORY
*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.io Dockerfile documentation.

View file

@ -9,10 +9,11 @@ docker-attach - Attach to a running container
# DESCRIPTION
If you **docker run** a container in detached mode (**-d**), you can reattach to
the detached container with **docker attach** using the container's ID or name.
the detached container with **docker attach** using the container's ID or name.
You can detach from the container again (and leave it running) with `CTRL-c` (for
a quiet exit) or `CTRL-\` to get a stacktrace of the Docker client when it quits.
You can detach from the container again (and leave it running) with `CTRL-q
CTRL-q` (for a quiet exit), or `CTRL-c` which will send a SIGKILL to the
container, or `CTRL-\` to get a stacktrace of the Docker client when it quits.
When you detach from a container the exit code will be returned to
the client.

View file

@ -2,7 +2,7 @@
% William Henry
% APRIL 2014
# NAME
docker-build - Build a container image from a Dockerfile source at PATH
docker-build - Build an image from a Dockerfile source at PATH
# SYNOPSIS
**docker build** [**--no-cache**[=*false*]] [**-q**|**--quiet**[=*false*]]
@ -17,7 +17,7 @@ be used by **ADD** commands found within the Dockerfile.
Warning, this will send a lot of data to the Docker daemon depending
on the contents of the current directory. The build is run by the Docker
daemon, not by the CLI, so the whole context must be transferred to the daemon.
The Docker CLI reports "Uploading context" when the context is sent to
The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to
the daemon.
When a single Dockerfile is given as the URL, then no context is set.
@ -34,8 +34,9 @@ as context.
build process. The default is true.
**-t**, **--tag**=*tag*
Tag to be applied to the resulting image on successful completion of
the build.
The name to be applied to the resulting image on successful completion of
the build. `tag` in this context means the entire image name including the
optional TAG after the ':'.
**--no-cache**=*true*|*false*
When set to true, do not use a cache when building the image. The
@ -66,6 +67,40 @@ in the Dockerfile. Note: If you include a tar file (a good practice!), then
Docker will automatically extract the contents of the tar file
specified within the `ADD` instruction into the specified target.
## Building an image and naming that image
A good practice is to give a name to the image you are building. There are
no hard rules here but it is best to give the names consideration.
The **-t**/**--tag** flag is used to rename an image. Here are some examples:
Though it is not a good practice, image names can be arbtrary:
docker build -t myimage .
A better approach is to provide a fully qualified and meaningful repository,
name, and tag (where the tag in this context means the qualifier after
the ":"). In this example we build a JBoss image for the Fedora repository
and give it the version 1.0:
docker build -t fedora/jboss:1.0
The next example is for the "whenry" user repository and uses Fedora and
JBoss and gives it the version 2.1 :
docker build -t whenry/fedora-jboss:V2.1
If you do not provide a version tag then Docker will assign `latest`:
docker build -t whenry/fedora-jboss
When you list the images, the image above will have the tag `latest`.
So renaming an image is arbitrary but consideration should be given to
a useful convention that makes sense for consumers and should also take
into account Docker community conventions.
## Building an image using a URL
This will clone the specified Github repository from the URL and use it

View file

@ -14,8 +14,8 @@ docker-run - Run a process in an isolated container
[**-e**|**--env**=*environment*] [**--entrypoint**=*command*]
[**--expose**=*port*] [**-P**|**--publish-all**[=*false*]]
[**-p**|**--publish**=*port-mappping*] [**-h**|**--hostname**=*hostname*]
[**--rm**[=*false*]] [**--priviledged**[=*false*]
[**-i**|**--interactive**[=*false*]
[**--rm**[=*false*]] [**--privileged**[=*false*]]
[**-i**|**--interactive**[=*false*]]
[**-t**|**--tty**[=*false*]] [**--lxc-conf**=*options*]
[**-n**|**--networking**[=*true*]]
[**-v**|**--volume**=*volume*] [**--volumes-from**=*container-id*]
@ -64,6 +64,9 @@ the other shell to view a list of the running containers. You can reattach to a
detached container with **docker attach**. If you choose to run a container in
the detached mode, then you cannot use the **-rm** option.
When attached in the tty mode, you can detach from a running container without
stopping the process by pressing the keys CTRL-P CTRL-Q.
**--dns**=*IP-address*
Set custom DNS servers. This option can be used to override the DNS
@ -100,8 +103,8 @@ container can be started with the **--link**.
**-m**, **-memory**=*memory-limit*
Allows you to constrain the memory available to a container. If the host
supports swap memory, then the -m memory setting can be larger than physical
RAM. The memory limit format: <number><optional unit>, where unit = b, k, m or
g.
RAM. If a limit of 0 is specified, the container's memory is not limited. The
memory limit format: <number><optional unit>, where unit = b, k, m or g.
**-P**, **-publish-all**=*true*|*false*
When set to true publish all exposed ports to the host interfaces. The
@ -164,7 +167,7 @@ and foreground Docker containers.
Docker container. This is because by default a container is not allowed to
access any devices. A “privileged” container is given access to all devices.
When the operator executes **docker run -privileged**, Docker will enable access
When the operator executes **docker run --privileged**, Docker will enable access
to all devices on the host as well as set some configuration in AppArmor to
allow the container nearly all the same access to the host as processes running
outside of a container on the host.
@ -190,18 +193,28 @@ interactive shell. The default is value is false.
Set a username or UID for the container.
**-v**, **-volume**=*volume*
Bind mount a volume to the container. The **-v** option can be used one or
**-v**, **-volume**=*volume*[:ro|:rw]
Bind mount a volume to the container.
The **-v** option can be used one or
more times to add one or more mounts to a container. These mounts can then be
used in other containers using the **--volumes-from** option. See examples.
used in other containers using the **--volumes-from** option.
The volume may be optionally suffixed with :ro or :rw to mount the volumes in
read-only or read-write mode, respectively. By default, the volumes are mounted
read-write. See examples.
**--volumes-from**=*container-id*
**--volumes-from**=*container-id*[:ro|:rw]
Will mount volumes from the specified container identified by container-id.
Once a volume is mounted in a one container it can be shared with other
containers using the **--volumes-from** option when running those other
containers. The volumes can be shared even if the original container with the
mount is not running.
mount is not running.
The container ID may be optionally suffixed with :ro or
:rw to mount the volumes in read-only or read-write mode, respectively. By
default, the volumes are mounted in the same mode (read write or read only) as
the reference container.
**-w**, **-workdir**=*directory*
@ -227,7 +240,7 @@ can override the working directory by using the **-w** option.
## Exposing log messages from the container to the host's log
If you want messages that are logged in your container to show up in the host's
syslog/journal then you should bind mount the /var/log directory as follows.
syslog/journal then you should bind mount the /dev/log directory as follows.
# docker run -v /dev/log:/dev/log -i -t fedora /bin/bash
@ -307,7 +320,7 @@ fedora-data image:
# docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true
# docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash
Multiple -volumes-from parameters will bring together multiple data volumes from
Multiple --volumes-from parameters will bring together multiple data volumes from
multiple containers. And it's possible to mount the volumes that came from the
DATA container in yet another container via the fedora-container1 intermidiery
container, allowing to abstract the actual data source from users of that data:

View file

@ -5,7 +5,7 @@
docker-search - Search the docker index for images
# SYNOPSIS
**docker search** **--no-trunc**[=*false*] **-t**|**--trusted**[=*false*]
**docker search** **--no-trunc**[=*false*] **--automated**[=*false*]
**-s**|**--stars**[=*0*] TERM
# DESCRIPTION
@ -13,7 +13,7 @@ docker-search - Search the docker index for images
Search an index for an image with that matches the term TERM. The table
of images returned displays the name, description (truncated by default),
number of stars awarded, whether the image is official, and whether it
is trusted.
is automated.
# OPTIONS
**--no-trunc**=*true*|*false*
@ -23,8 +23,8 @@ is trusted.
Only displays with at least NUM (integer) stars. I.e. only those images
ranked >=NUM.
**-t**, **--trusted**=*true*|*false*
When true only show trusted builds. The default is false.
**--automated**=*true*|*false*
When true only show automated builds. The default is false.
# EXAMPLE
@ -34,19 +34,19 @@ Search the registry for the term 'fedora' and only display those images
ranked 3 or higher:
$ sudo docker search -s 3 fedora
NAME DESCRIPTION STARS OFFICIAL TRUSTED
NAME DESCRIPTION STARS OFFICIAL AUTOMATED
mattdm/fedora A basic Fedora image corresponding roughly... 50
fedora (Semi) Official Fedora base image. 38
mattdm/fedora-small A small Fedora image on which to build. Co... 8
goldmann/wildfly A WildFly application server running on a ... 3 [OK]
## Search the registry for trusted images
## Search the registry for automated images
Search the registry for the term 'fedora' and only display trusted images
Search the registry for the term 'fedora' and only display automated images
ranked 1 or higher:
$ sudo docker search -s 1 -t fedora
NAME DESCRIPTION STARS OFFICIAL TRUSTED
NAME DESCRIPTION STARS OFFICIAL AUTOMATED
goldmann/wildfly A WildFly application server running on a ... 3 [OK]
tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK]

View file

@ -20,6 +20,10 @@ the process
**-i**, **--interactive**=*true*|*false*
When true attach to container's stdin
# NOTES
If run on a started container, start takes no action and succeeds
unconditionally.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
based on docker.io source material and internal work.

View file

@ -9,11 +9,12 @@ docker-tag - Tag an image in the repository
IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]
# DESCRIPTION
This will tag an image in the repository.
This will give a new alias to an image in the repository. This refers to the
entire image name including the optional TAG after the ':'.
# "OPTIONS"
**-f**, **--force**=*true*|*false*
When set to true, force the tag name. The default is *false*.
When set to true, force the alias. The default is *false*.
**REGISTRYHOST**
The hostname of the registry if required. This may also include the port
@ -26,13 +27,16 @@ separated by a ':'
The image name.
**TAG**
The tag you are assigning to the image.
The tag you are assigning to the image. Though this is arbitrary it is
recommended to be used for a version to disinguish images with the same name.
Note that here TAG is a part of the overall name or "tag".
# EXAMPLES
## Tagging an image
## Giving an image a new alias
Here is an example of tagging an image with the tag version1.0 :
Here is an example of aliasing an image (e.g. 0e5574283393) as "httpd" and
tagging it into the "fedora" repository with "version1.0":
docker tag 0e5574283393 fedora/httpd:version1.0

View file

@ -26,10 +26,10 @@ To see the man page for a command run **man docker <command>**.
**-D**=*true*|*false*
Enable debug mode. Default is false.
**-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or
**-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host:port] to bind or
unix://[/path/to/socket] to use.
Enable both the socket support and TCP on localhost. When host=[0.0.0.0],
port=[4243] or path =[/var/run/docker.sock] is omitted, default values are used.
The socket(s) to bind to in daemon mode specified using one or more
tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.
**--api-enable-cors**=*true*|*false*
Enable CORS headers in the remote API. Default is false.
@ -73,7 +73,7 @@ port=[4243] or path =[/var/run/docker.sock] is omitted, default values are used.
**-v**=*true*|*false*
Print version information and quit. Default is false.
**--selinux-enabled=*true*|*false*
**--selinux-enabled**=*true*|*false*
Enable selinux support. Default is false.
# COMMANDS
@ -81,7 +81,7 @@ port=[4243] or path =[/var/run/docker.sock] is omitted, default values are used.
Attach to a running container
**docker-build(1)**
Build a container from a Dockerfile
Build an image from a Dockerfile
**docker-commit(1)**
Create a new image from a container's changes

View file

@ -3,7 +3,7 @@
.\"
.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
.SH NAME
docker-build \- Build a container image from a Dockerfile source at PATH
docker-build \- Build an image from a Dockerfile source at PATH
.SH SYNOPSIS
.B docker build
[\fB--no-cache\fR[=\fIfalse\fR]

View file

@ -39,7 +39,7 @@ CPU shares in relative weight. You can increase the priority of a container with
.TP
.B -m, --memory=\fImemory-limit\fR:
Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. The memory limit format: <number><optional unit>, where unit = b, k, m or g.
Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. If a limit of 0 is specified, the container's memory is not limited. The memory limit format: <number><optional unit>, where unit = b, k, m or g.
.TP
.B --cidfile=\fIfile\fR:
@ -245,7 +245,7 @@ docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash
.RE
.sp
.TP
Multiple -volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data:
Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data:
.sp
.RS
docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash

View file

@ -19,7 +19,7 @@ To see the man page for a command run \fBman docker <command>\fR.
Enable debug mode
.TP
.B\-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use.
When host=[0.0.0.0], port=[4243] or path
When host=[0.0.0.0], port=[2375] or path
=[/var/run/docker.sock] is omitted, default values are used.
.TP
.B \-\-api-enable-cors=false
@ -69,7 +69,7 @@ Print version information and quit
Attach to a running container
.TP
.B build
Build a container from a Dockerfile
Build an image from a Dockerfile
.TP
.B commit
Create a new image from a container's changes

View file

@ -2,6 +2,10 @@
# Generate a very minimal filesystem based on busybox-static,
# and load it into the local docker under the name "busybox".
echo >&2
echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static'
echo >&2
BUSYBOX=$(which busybox)
[ "$BUSYBOX" ] || {
echo "Sorry, I could not locate busybox."

View file

@ -1,6 +1,10 @@
#!/usr/bin/env bash
set -e
echo >&2
echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap'
echo >&2
variant='minbase'
include='iproute,iputils-ping'
arch='amd64' # intentionally undocumented for now

View file

@ -8,6 +8,10 @@
set -e
echo >&2
echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse'
echo >&2
repo="$1"
distro="$2"
mirror="$3"

105
contrib/mkimage.sh Executable file
View file

@ -0,0 +1,105 @@
#!/usr/bin/env bash
set -e
mkimg="$(basename "$0")"
usage() {
echo >&2 "usage: $mkimg [-d dir] [-t tag] script [script-args]"
echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie"
echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal trusty"
echo >&2 " $mkimg -t someuser/busybox busybox-static"
echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5"
exit 1
}
scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage"
optTemp=$(getopt --options '+d:t:h' --longoptions 'dir:,tag:,help' --name "$mkimg" -- "$@")
eval set -- "$optTemp"
unset optTemp
dir=
tag=
while true; do
case "$1" in
-d|--dir) dir="$2" ; shift 2 ;;
-t|--tag) tag="$2" ; shift 2 ;;
-h|--help) usage ;;
--) shift ; break ;;
esac
done
script="$1"
[ "$script" ] || usage
shift
if [ ! -x "$scriptDir/$script" ]; then
echo >&2 "error: $script does not exist or is not executable"
echo >&2 " see $scriptDir for possible scripts"
exit 1
fi
# don't mistake common scripts like .febootstrap-minimize as image-creators
if [[ "$script" == .* ]]; then
echo >&2 "error: $script is a script helper, not a script"
echo >&2 " see $scriptDir for possible scripts"
exit 1
fi
delDir=
if [ -z "$dir" ]; then
dir="$(mktemp -d ${TMPDIR:-/tmp}/docker-mkimage.XXXXXXXXXX)"
delDir=1
fi
rootfsDir="$dir/rootfs"
( set -x; mkdir -p "$rootfsDir" )
# pass all remaining arguments to $script
"$scriptDir/$script" "$rootfsDir" "$@"
# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them
rm -rf "$rootfsDir/dev" "$rootfsDir/proc"
mkdir -p "$rootfsDir/dev" "$rootfsDir/proc"
# make sure /etc/resolv.conf has something useful in it
mkdir -p "$rootfsDir/etc"
cat > "$rootfsDir/etc/resolv.conf" <<'EOF'
nameserver 8.8.8.8
nameserver 8.8.4.4
EOF
tarFile="$dir/rootfs.tar.xz"
touch "$tarFile"
(
set -x
tar --numeric-owner -caf "$tarFile" -C "$rootfsDir" --transform='s,^./,,' .
)
echo >&2 "+ cat > '$dir/Dockerfile'"
cat > "$dir/Dockerfile" <<'EOF'
FROM scratch
ADD rootfs.tar.xz /
EOF
# if our generated image has a decent shell, let's set a default command
for shell in /bin/bash /usr/bin/fish /usr/bin/zsh /bin/sh; do
if [ -x "$rootfsDir/$shell" ]; then
( set -x; echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" )
break
fi
done
( set -x; rm -rf "$rootfsDir" )
if [ "$tag" ]; then
( set -x; docker build -t "$tag" "$dir" )
elif [ "$delDir" ]; then
# if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_
( set -x; docker build "$dir" )
fi
if [ "$delDir" ]; then
( set -x; rm -rf "$dir" )
fi

View file

@ -0,0 +1,28 @@
#!/usr/bin/env bash
set -e
rootfsDir="$1"
shift
(
cd "$rootfsDir"
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target"
# locales
rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
# docs
rm -rf usr/share/{man,doc,info,gnome/help}
# cracklib
#rm -rf usr/share/cracklib
# i18n
rm -rf usr/share/i18n
# yum cache
rm -rf var/cache/yum
mkdir -p --mode=0755 var/cache/yum
# sln
rm -rf sbin/sln
# ldconfig
#rm -rf sbin/ldconfig
rm -rf etc/ld.so.cache var/cache/ldconfig
mkdir -p --mode=0755 var/cache/ldconfig
)

34
contrib/mkimage/busybox-static Executable file
View file

@ -0,0 +1,34 @@
#!/usr/bin/env bash
set -e
rootfsDir="$1"
shift
busybox="$(which busybox 2>/dev/null || true)"
if [ -z "$busybox" ]; then
echo >&2 'error: busybox: not found'
echo >&2 ' install it with your distribution "busybox-static" package'
exit 1
fi
if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then
echo >&2 "error: '$busybox' appears to be a dynamic executable"
echo >&2 ' you should install your distribution "busybox-static" package instead'
exit 1
fi
mkdir -p "$rootfsDir/bin"
rm -f "$rootfsDir/bin/busybox" # just in case
cp "$busybox" "$rootfsDir/bin/busybox"
(
cd "$rootfsDir"
IFS=$'\n'
modules=( $(bin/busybox --list-modules) )
unset IFS
for module in "${modules[@]}"; do
mkdir -p "$(dirname "$module")"
ln -sf /bin/busybox "$module"
done
)

125
contrib/mkimage/debootstrap Executable file
View file

@ -0,0 +1,125 @@
#!/usr/bin/env bash
set -e
rootfsDir="$1"
shift
# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap
before=()
while [ $# -gt 0 ] && [[ "$1" == -* ]]; do
before+=( "$1" )
shift
done
suite="$1"
shift
(
set -x
debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@"
)
# now for some Docker-specific tweaks
# prevent init scripts from running during install/update
echo >&2 "+ cat > '$rootfsDir/usr/sbin/policy-rc.d'"
cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF'
#!/bin/sh
exit 101
EOF
chmod +x "$rootfsDir/usr/sbin/policy-rc.d"
# prevent upstart scripts from running during install/update
(
set -x
chroot "$rootfsDir" dpkg-divert --local --rename --add /sbin/initctl
ln -sf /bin/true "$rootfsDir/sbin/initctl"
)
# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
( set -x; chroot "$rootfsDir" apt-get clean )
# Ubuntu 10.04 sucks... :)
if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then
# force dpkg not to call sync() after package extraction (speeding up installs)
echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'"
echo 'force-unsafe-io' > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup"
fi
if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then
# _keep_ us lean by effectively running "apt-get clean" after every install
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'"
cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF
DPkg::Post-Invoke { ${aptGetClean} };
APT::Update::Post-Invoke { ${aptGetClean} };
Dir::Cache::pkgcache "";
Dir::Cache::srcpkgcache "";
EOF
# remove apt-cache translations for fast "apt-get update"
echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'"
echo 'Acquire::Languages "none";' > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages"
fi
if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
# tweak sources.list, where appropriate
lsbDist=
if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then
lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")"
fi
if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then
lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")"
fi
if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then
lsbDist='Debian'
fi
case "$lsbDist" in
debian|Debian)
# updates and security!
if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then
(
set -x
sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list"
echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list"
)
fi
;;
ubuntu|Ubuntu)
# add the universe, updates, and security repositories
(
set -x
sed -i "
s/ $suite main$/ $suite main universe/; p;
s/ $suite main/ ${suite}-updates main/; p;
s/ $suite-updates main/ ${suite}-security main/
" "$rootfsDir/etc/apt/sources.list"
)
;;
tanglu|Tanglu)
# add the updates repository
if [ "$suite" != 'devel' ]; then
(
set -x
sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list"
)
fi
;;
steamos|SteamOS)
# add contrib and non-free
(
set -x
sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list"
)
;;
esac
fi
# make sure we're fully up-to-date, too
(
set -x
chroot "$rootfsDir" apt-get update
chroot "$rootfsDir" apt-get dist-upgrade -y
)

25
contrib/mkimage/rinse Executable file
View file

@ -0,0 +1,25 @@
#!/usr/bin/env bash
set -e
rootfsDir="$1"
shift
# specifying --arch below is safe because "$@" can override it and the "latest" one wins :)
(
set -x
rinse --directory "$rootfsDir" --arch amd64 "$@"
)
"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir"
if [ -d "$rootfsDir/etc/sysconfig" ]; then
# allow networking init scripts inside the container to work without extra steps
echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network"
fi
# make sure we're fully up-to-date, too
(
set -x
chroot "$rootfsDir" yum update -y
)

View file

@ -31,20 +31,20 @@ stop on runlevel [!2345]
respawn
script
/usr/bin/docker -d -H=tcp://0.0.0.0:4243
/usr/bin/docker -d -H=tcp://0.0.0.0:2375
end script
```
Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal:
```
ssh -L 4243:localhost:4243 -p 2222 vagrant@localhost
ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost
```
(The first 4243 is what your host can connect to, the second 4243 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.)
(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.)
Note that because the port has been changed, to run docker commands from within the command line you must run them like this:
```
sudo docker -H 0.0.0.0:4243 < commands for docker >
sudo docker -H 0.0.0.0:2375 < commands for docker >
```

10
daemon/README.md Normal file
View file

@ -0,0 +1,10 @@
This directory contains code pertaining to running containers and storing images
Code pertaining to running containers:
- execdriver
- networkdriver
Code pertaining to storing images:
- graphdriver

View file

@ -9,6 +9,7 @@ import (
"log"
"os"
"path"
"path/filepath"
"strings"
"sync"
"syscall"
@ -22,8 +23,10 @@ import (
"github.com/dotcloud/docker/links"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer/devices"
"github.com/dotcloud/docker/pkg/networkfs/etchosts"
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
"github.com/dotcloud/docker/pkg/symlink"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/utils"
)
@ -81,42 +84,6 @@ type Container struct {
activeLinks map[string]*links.Link
}
// Inject the io.Reader at the given path. Note: do not close the reader
func (container *Container) Inject(file io.Reader, pth string) error {
if err := container.Mount(); err != nil {
return fmt.Errorf("inject: error mounting container %s: %s", container.ID, err)
}
defer container.Unmount()
// Return error if path exists
destPath := path.Join(container.basefs, pth)
if _, err := os.Stat(destPath); err == nil {
// Since err is nil, the path could be stat'd and it exists
return fmt.Errorf("%s exists", pth)
} else if !os.IsNotExist(err) {
// Expect err might be that the file doesn't exist, so
// if it's some other error, return that.
return err
}
// Make sure the directory exists
if err := os.MkdirAll(path.Join(container.basefs, path.Dir(pth)), 0755); err != nil {
return err
}
dest, err := os.Create(destPath)
if err != nil {
return err
}
defer dest.Close()
if _, err := io.Copy(dest, file); err != nil {
return err
}
return nil
}
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
if err != nil {
@ -170,6 +137,16 @@ func (container *Container) WriteHostConfig() (err error) {
return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
}
func (container *Container) getResourcePath(path string) string {
cleanPath := filepath.Join("/", path)
return filepath.Join(container.basefs, cleanPath)
}
func (container *Container) getRootResourcePath(path string) string {
cleanPath := filepath.Join("/", path)
return filepath.Join(container.root, cleanPath)
}
func populateCommand(c *Container, env []string) error {
var (
en *execdriver.Network
@ -215,20 +192,23 @@ func populateCommand(c *Container, env []string) error {
Memory: c.Config.Memory,
MemorySwap: c.Config.MemorySwap,
CpuShares: c.Config.CpuShares,
Cpuset: c.Config.Cpuset,
}
c.command = &execdriver.Command{
ID: c.ID,
Privileged: c.hostConfig.Privileged,
Rootfs: c.RootfsPath(),
InitPath: "/.dockerinit",
Entrypoint: c.Path,
Arguments: c.Args,
WorkingDir: c.Config.WorkingDir,
Network: en,
Tty: c.Config.Tty,
User: c.Config.User,
Config: context,
Resources: resources,
ID: c.ID,
Privileged: c.hostConfig.Privileged,
Rootfs: c.RootfsPath(),
InitPath: "/.dockerinit",
Entrypoint: c.Path,
Arguments: c.Args,
WorkingDir: c.Config.WorkingDir,
Network: en,
Tty: c.Config.Tty,
User: c.Config.User,
Config: context,
Resources: resources,
AllowedDevices: devices.DefaultAllowedDevices,
AutoCreatedDevices: devices.DefaultAutoCreatedDevices,
}
c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
c.command.Env = env
@ -344,7 +324,7 @@ func (container *Container) StderrLogPipe() io.ReadCloser {
}
func (container *Container) buildHostnameFile() error {
container.HostnamePath = path.Join(container.root, "hostname")
container.HostnamePath = container.getRootResourcePath("hostname")
if container.Config.Domainname != "" {
return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
}
@ -356,7 +336,7 @@ func (container *Container) buildHostnameAndHostsFiles(IP string) error {
return err
}
container.HostsPath = path.Join(container.root, "hosts")
container.HostsPath = container.getRootResourcePath("hosts")
extraContent := make(map[string]string)
@ -455,6 +435,20 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
utils.Errorf("Error running container: %s", err)
}
// Cleanup
container.cleanup()
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
if container.daemon != nil && container.daemon.srv != nil {
container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
}
close(container.waitLock)
if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
container.State.SetStopped(exitCode)
@ -470,20 +464,6 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
}
}
// Cleanup
container.cleanup()
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
if container.daemon != nil && container.daemon.srv != nil {
container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
}
close(container.waitLock)
return err
}
@ -522,12 +502,37 @@ func (container *Container) KillSig(sig int) error {
container.Lock()
defer container.Unlock()
// We could unpause the container for them rather than returning this error
if container.State.IsPaused() {
return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID)
}
if !container.State.IsRunning() {
return nil
}
return container.daemon.Kill(container, sig)
}
func (container *Container) Pause() error {
if container.State.IsPaused() {
return fmt.Errorf("Container %s is already paused", container.ID)
}
if !container.State.IsRunning() {
return fmt.Errorf("Container %s is not running", container.ID)
}
return container.daemon.Pause(container)
}
func (container *Container) Unpause() error {
if !container.State.IsPaused() {
return fmt.Errorf("Container %s is not paused", container.ID)
}
if !container.State.IsRunning() {
return fmt.Errorf("Container %s is not running", container.ID)
}
return container.daemon.Unpause(container)
}
func (container *Container) Kill() error {
if !container.State.IsRunning() {
return nil
@ -571,6 +576,7 @@ func (container *Container) Stop(seconds int) error {
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
// 3. If it doesn't, then send SIGKILL
if err := container.Kill(); err != nil {
container.Wait()
return err
}
}
@ -640,7 +646,7 @@ func (container *Container) Export() (archive.Archive, error) {
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
done := make(chan bool)
done := make(chan bool, 1)
go func() {
container.Wait()
done <- true
@ -659,6 +665,8 @@ func (container *Container) Mount() error {
}
func (container *Container) Changes() ([]archive.Change, error) {
container.Lock()
defer container.Unlock()
return container.daemon.Changes(container)
}
@ -674,7 +682,7 @@ func (container *Container) Unmount() error {
}
func (container *Container) logPath(name string) string {
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name))
return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
@ -682,11 +690,11 @@ func (container *Container) ReadLog(name string) (io.Reader, error) {
}
func (container *Container) hostConfigPath() string {
return path.Join(container.root, "hostconfig.json")
return container.getRootResourcePath("hostconfig.json")
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
return container.getRootResourcePath("config.json")
}
// This method must be exported to be used from the lxc template
@ -745,8 +753,16 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
if err := container.Mount(); err != nil {
return nil, err
}
var filter []string
basePath := path.Join(container.basefs, resource)
resPath := container.getResourcePath(resource)
basePath, err := symlink.FollowSymlinkInScope(resPath, container.basefs)
if err != nil {
container.Unmount()
return nil, err
}
stat, err := os.Stat(basePath)
if err != nil {
container.Unmount()
@ -766,6 +782,7 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
Includes: filter,
})
if err != nil {
container.Unmount()
return nil, err
}
return utils.NewReadCloserWrapper(archive, func() error {
@ -844,7 +861,7 @@ func (container *Container) setupContainerDns() error {
} else if len(daemon.config.DnsSearch) > 0 {
dnsSearch = daemon.config.DnsSearch
}
container.ResolvConfPath = path.Join(container.root, "resolv.conf")
container.ResolvConfPath = container.getRootResourcePath("resolv.conf")
return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch)
} else {
container.ResolvConfPath = "/etc/resolv.conf"
@ -865,9 +882,17 @@ func (container *Container) initializeNetworking() error {
container.Config.Hostname = parts[0]
container.Config.Domainname = parts[1]
}
container.HostsPath = "/etc/hosts"
return container.buildHostnameFile()
content, err := ioutil.ReadFile("/etc/hosts")
if os.IsNotExist(err) {
return container.buildHostnameAndHostsFiles("")
}
if err != nil {
return err
}
container.HostsPath = container.getRootResourcePath("hosts")
return ioutil.WriteFile(container.HostsPath, content, 0644)
} else if container.hostConfig.NetworkMode.IsContainer() {
// we need to get the hosts files from the container to join
nc, err := container.getNetworkedContainer()
@ -982,12 +1007,12 @@ func (container *Container) setupWorkingDirectory() error {
if container.Config.WorkingDir != "" {
container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
pthInfo, err := os.Stat(path.Join(container.basefs, container.Config.WorkingDir))
pthInfo, err := os.Stat(container.getResourcePath(container.Config.WorkingDir))
if err != nil {
if !os.IsNotExist(err) {
return err
}
if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil {
if err := os.MkdirAll(container.getResourcePath(container.Config.WorkingDir), 0755); err != nil {
return err
}
}

View file

@ -1,7 +1,6 @@
package daemon
import (
"container/list"
"fmt"
"io"
"io/ioutil"
@ -28,7 +27,7 @@ import (
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/pkg/graphdb"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/mount"
"github.com/dotcloud/docker/pkg/namesgenerator"
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
"github.com/dotcloud/docker/pkg/selinux"
"github.com/dotcloud/docker/pkg/sysinfo"
@ -47,10 +46,43 @@ var (
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
)
type contStore struct {
s map[string]*Container
sync.Mutex
}
func (c *contStore) Add(id string, cont *Container) {
c.Lock()
c.s[id] = cont
c.Unlock()
}
func (c *contStore) Get(id string) *Container {
c.Lock()
res := c.s[id]
c.Unlock()
return res
}
func (c *contStore) Delete(id string) {
c.Lock()
delete(c.s, id)
c.Unlock()
}
func (c *contStore) List() []*Container {
containers := new(History)
for _, cont := range c.s {
containers.Add(cont)
}
containers.Sort()
return *containers
}
type Daemon struct {
repository string
sysInitPath string
containers *list.List
containers *contStore
graph *graph.Graph
repositories *graph.TagStore
idIndex *utils.TruncIndex
@ -64,38 +96,14 @@ type Daemon struct {
execDriver execdriver.Driver
}
// Mountpoints should be private to the container
func remountPrivate(mountPoint string) error {
mounted, err := mount.Mounted(mountPoint)
if err != nil {
return err
}
if !mounted {
if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
return err
}
}
return mount.ForceMount("", mountPoint, "none", "private")
// Install installs daemon capabilities to eng.
func (daemon *Daemon) Install(eng *engine.Engine) error {
return eng.Register("container_inspect", daemon.ContainerInspect)
}
// List returns an array of all containers registered in the daemon.
func (daemon *Daemon) List() []*Container {
containers := new(History)
for e := daemon.containers.Front(); e != nil; e = e.Next() {
containers.Add(e.Value.(*Container))
}
return *containers
}
func (daemon *Daemon) getContainerElement(id string) *list.Element {
for e := daemon.containers.Front(); e != nil; e = e.Next() {
container := e.Value.(*Container)
if container.ID == id {
return e
}
}
return nil
return daemon.containers.List()
}
// Get looks for a container by the specified ID or name, and returns it.
@ -110,11 +118,7 @@ func (daemon *Daemon) Get(name string) *Container {
return nil
}
e := daemon.getContainerElement(id)
if e == nil {
return nil
}
return e.Value.(*Container)
return daemon.containers.Get(id)
}
// Exists returns a true if a container of the specified ID or name exists,
@ -141,7 +145,13 @@ func (daemon *Daemon) load(id string) (*Container, error) {
}
// Register makes a container object usable by the daemon as <container.ID>
// This is a wrapper for register
func (daemon *Daemon) Register(container *Container) error {
return daemon.register(container, true, nil)
}
// register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) register(container *Container, updateSuffixarray bool, containersToStart *[]*Container) error {
if container.daemon != nil || daemon.Exists(container.ID) {
return fmt.Errorf("Container is already loaded")
}
@ -164,8 +174,15 @@ func (daemon *Daemon) Register(container *Container) error {
container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
}
// done
daemon.containers.PushBack(container)
daemon.idIndex.Add(container.ID)
daemon.containers.Add(container.ID, container)
// don't update the Suffixarray if we're starting up
// we'll waste time if we update it for every container
if updateSuffixarray {
daemon.idIndex.Add(container.ID)
} else {
daemon.idIndex.AddWithoutSuffixarrayUpdate(container.ID)
}
// FIXME: if the container is supposed to be running but is not, auto restart it?
// if so, then we need to restart monitor and init a new lock
@ -203,13 +220,13 @@ func (daemon *Daemon) Register(container *Container) error {
if !info.IsRunning() {
utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
if daemon.config.AutoRestart {
utils.Debugf("Restarting")
utils.Debugf("Marking as restarting")
if err := container.Unmount(); err != nil {
utils.Debugf("restart unmount error %s", err)
}
if err := container.Start(); err != nil {
return err
if containersToStart != nil {
*containersToStart = append(*containersToStart, container)
}
} else {
utils.Debugf("Marking as stopped")
@ -231,20 +248,15 @@ func (daemon *Daemon) Register(container *Container) error {
func (daemon *Daemon) ensureName(container *Container) error {
if container.Name == "" {
name, err := generateRandomName(daemon)
name, err := daemon.generateNewName(container.ID)
if err != nil {
name = utils.TruncateID(container.ID)
return err
}
container.Name = name
if err := container.ToDisk(); err != nil {
utils.Debugf("Error saving container name %s", err)
}
if !daemon.containerGraph.Exists(name) {
if _, err := daemon.containerGraph.Set(name, container.ID); err != nil {
utils.Debugf("Setting default id - %s", err)
}
}
}
return nil
}
@ -264,7 +276,7 @@ func (daemon *Daemon) Destroy(container *Container) error {
return fmt.Errorf("The given container is <nil>")
}
element := daemon.getContainerElement(container.ID)
element := daemon.containers.Get(container.ID)
if element == nil {
return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
}
@ -275,7 +287,11 @@ func (daemon *Daemon) Destroy(container *Container) error {
// Deregister the container before removing its directory, to avoid race conditions
daemon.idIndex.Delete(container.ID)
daemon.containers.Remove(element)
daemon.containers.Delete(container.ID)
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
utils.Debugf("Unable to remove container from link graph: %s", err)
}
if err := daemon.driver.Remove(container.ID); err != nil {
return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
@ -286,10 +302,6 @@ func (daemon *Daemon) Destroy(container *Container) error {
return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
}
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
utils.Debugf("Unable to remove container from link graph: %s", err)
}
if err := os.RemoveAll(container.root); err != nil {
return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
}
@ -299,20 +311,25 @@ func (daemon *Daemon) Destroy(container *Container) error {
}
func (daemon *Daemon) restore() error {
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
var (
debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
containers = make(map[string]*Container)
currentDriver = daemon.driver.String()
containersToStart = []*Container{}
)
if !debug {
fmt.Printf("Loading containers: ")
}
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
containers := make(map[string]*Container)
currentDriver := daemon.driver.String()
for _, v := range dir {
id := v.Name()
container, err := daemon.load(id)
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
if !debug {
fmt.Print(".")
}
if err != nil {
@ -329,20 +346,16 @@ func (daemon *Daemon) restore() error {
}
}
register := func(container *Container) {
if err := daemon.Register(container); err != nil {
utils.Debugf("Failed to register container %s: %s", container.ID, err)
}
}
if entities := daemon.containerGraph.List("/", -1); entities != nil {
for _, p := range entities.Paths() {
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
if !debug {
fmt.Print(".")
}
e := entities[p]
if container, ok := containers[e.ID()]; ok {
register(container)
if err := daemon.register(container, false, &containersToStart); err != nil {
utils.Debugf("Failed to register container %s: %s", container.ID, err)
}
delete(containers, e.ID())
}
}
@ -351,18 +364,25 @@ func (daemon *Daemon) restore() error {
// Any containers that are left over do not exist in the graph
for _, container := range containers {
// Try to set the default name for a container if it exists prior to links
container.Name, err = generateRandomName(daemon)
container.Name, err = daemon.generateNewName(container.ID)
if err != nil {
container.Name = utils.TruncateID(container.ID)
}
if _, err := daemon.containerGraph.Set(container.Name, container.ID); err != nil {
utils.Debugf("Setting default id - %s", err)
}
register(container)
if err := daemon.register(container, false, &containersToStart); err != nil {
utils.Debugf("Failed to register container %s: %s", container.ID, err)
}
}
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
daemon.idIndex.UpdateSuffixarray()
for _, container := range containersToStart {
utils.Debugf("Starting container %d", container.ID)
if err := container.Start(); err != nil {
utils.Debugf("Failed to start container %s: %s", container.ID, err)
}
}
if !debug {
fmt.Printf(": done.\n")
}
@ -450,42 +470,75 @@ func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
)
if name == "" {
name, err = generateRandomName(daemon)
if err != nil {
name = utils.TruncateID(id)
}
} else {
if !validContainerNamePattern.MatchString(name) {
return "", "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
if name, err = daemon.generateNewName(id); err != nil {
return "", "", err
}
return id, name, nil
}
if name, err = daemon.reserveName(id, name); err != nil {
return "", "", err
}
return id, name, nil
}
func (daemon *Daemon) reserveName(id, name string) (string, error) {
if !validContainerNamePattern.MatchString(name) {
return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
}
if name[0] != '/' {
name = "/" + name
}
// Set the enitity in the graph using the default name specified
if _, err := daemon.containerGraph.Set(name, id); err != nil {
if !graphdb.IsNonUniqueNameError(err) {
return "", "", err
return "", err
}
conflictingContainer, err := daemon.GetByName(name)
if err != nil {
if strings.Contains(err.Error(), "Could not find entity") {
return "", "", err
return "", err
}
// Remove name and continue starting the container
if err := daemon.containerGraph.Delete(name); err != nil {
return "", "", err
return "", err
}
} else {
nameAsKnownByUser := strings.TrimPrefix(name, "/")
return "", "", fmt.Errorf(
return "", fmt.Errorf(
"Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser,
utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser)
}
}
return id, name, nil
return name, nil
}
func (daemon *Daemon) generateNewName(id string) (string, error) {
var name string
for i := 0; i < 6; i++ {
name = namesgenerator.GetRandomName(i)
if name[0] != '/' {
name = "/" + name
}
if _, err := daemon.containerGraph.Set(name, id); err != nil {
if !graphdb.IsNonUniqueNameError(err) {
return "", err
}
continue
}
return name, nil
}
name = "/" + utils.TruncateID(id)
if _, err := daemon.containerGraph.Set(name, id); err != nil {
return "", err
}
return name, nil
}
func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) {
@ -592,15 +645,18 @@ func (daemon *Daemon) Commit(container *Container, repository, tag, comment, aut
containerID, containerImage string
containerConfig *runconfig.Config
)
if container != nil {
containerID = container.ID
containerImage = container.Image
containerConfig = container.Config
}
img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
if err != nil {
return nil, err
}
// Register the image if needed
if repository != "" {
if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
@ -629,11 +685,11 @@ func (daemon *Daemon) GetByName(name string) (*Container, error) {
if entity == nil {
return nil, fmt.Errorf("Could not find entity for %s", name)
}
e := daemon.getContainerElement(entity.ID())
e := daemon.containers.Get(entity.ID())
if e == nil {
return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID())
}
return e.Value.(*Container), nil
return e, nil
}
func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
@ -667,6 +723,35 @@ func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error
return nil
}
func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
if hostConfig != nil && hostConfig.Links != nil {
for _, l := range hostConfig.Links {
parts, err := utils.PartParser("name:alias", l)
if err != nil {
return err
}
child, err := daemon.GetByName(parts["name"])
if err != nil {
return err
}
if child == nil {
return fmt.Errorf("Could not get container for %s", parts["name"])
}
if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil {
return err
}
}
// After we load all the links into the daemon
// set them to nil on the hostconfig
hostConfig.Links = nil
if err := container.WriteHostConfig(); err != nil {
return err
}
}
return nil
}
// FIXME: harmonize with NewGraph()
func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
daemon, err := NewDaemonFromDirectory(config, eng)
@ -680,20 +765,22 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
if !config.EnableSelinuxSupport {
selinux.SetDisabled()
}
// Create the root directory if it doesn't exists
if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
// Set the default driver
graphdriver.DefaultDriver = config.GraphDriver
// Load storage driver
driver, err := graphdriver.New(config.Root)
driver, err := graphdriver.New(config.Root, config.GraphOptions)
if err != nil {
return nil, err
}
utils.Debugf("Using graph driver %s", driver)
if err := remountPrivate(config.Root); err != nil {
return nil, err
}
daemonRepo := path.Join(config.Root, "containers")
if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) {
@ -713,7 +800,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
// We don't want to use a complex driver like aufs or devmapper
// for volumes, just a plain filesystem
volumesDriver, err := graphdriver.GetDriver("vfs", config.Root)
volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions)
if err != nil {
return nil, err
}
@ -777,7 +864,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
daemon := &Daemon{
repository: daemonRepo,
containers: list.New(),
containers: &contStore{s: make(map[string]*Container)},
graph: g,
repositories: repositories,
idIndex: utils.NewTruncIndex([]string{}),
@ -914,6 +1001,22 @@ func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback e
return daemon.execDriver.Run(c.command, pipes, startCallback)
}
func (daemon *Daemon) Pause(c *Container) error {
if err := daemon.execDriver.Pause(c.command); err != nil {
return err
}
c.State.SetPaused()
return nil
}
func (daemon *Daemon) Unpause(c *Container) error {
if err := daemon.execDriver.Unpause(c.command); err != nil {
return err
}
c.State.SetUnpaused()
return nil
}
func (daemon *Daemon) Kill(c *Container, sig int) error {
return daemon.execDriver.Kill(c.command, sig)
}

View file

@ -5,6 +5,8 @@ import (
"io"
"os"
"os/exec"
"github.com/dotcloud/docker/pkg/libcontainer/devices"
)
// Context is a generic key value pair that allows
@ -81,6 +83,8 @@ type TtyTerminal interface {
type Driver interface {
Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code
Kill(c *Command, sig int) error
Pause(c *Command) error
Unpause(c *Command) error
Name() string // Driver name
Info(id string) Info // "temporary" hack (until we move state from core to plugins)
GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container.
@ -103,9 +107,10 @@ type NetworkInterface struct {
}
type Resources struct {
Memory int64 `json:"memory"`
MemorySwap int64 `json:"memory_swap"`
CpuShares int64 `json:"cpu_shares"`
Memory int64 `json:"memory"`
MemorySwap int64 `json:"memory_swap"`
CpuShares int64 `json:"cpu_shares"`
Cpuset string `json:"cpuset"`
}
type Mount struct {
@ -119,20 +124,22 @@ type Mount struct {
type Command struct {
exec.Cmd `json:"-"`
ID string `json:"id"`
Privileged bool `json:"privileged"`
User string `json:"user"`
Rootfs string `json:"rootfs"` // root fs of the container
InitPath string `json:"initpath"` // dockerinit
Entrypoint string `json:"entrypoint"`
Arguments []string `json:"arguments"`
WorkingDir string `json:"working_dir"`
ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
Tty bool `json:"tty"`
Network *Network `json:"network"`
Config map[string][]string `json:"config"` // generic values that specific drivers can consume
Resources *Resources `json:"resources"`
Mounts []Mount `json:"mounts"`
ID string `json:"id"`
Privileged bool `json:"privileged"`
User string `json:"user"`
Rootfs string `json:"rootfs"` // root fs of the container
InitPath string `json:"initpath"` // dockerinit
Entrypoint string `json:"entrypoint"`
Arguments []string `json:"arguments"`
WorkingDir string `json:"working_dir"`
ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
Tty bool `json:"tty"`
Network *Network `json:"network"`
Config map[string][]string `json:"config"` // generic values that specific drivers can consume
Resources *Resources `json:"resources"`
Mounts []Mount `json:"mounts"`
AllowedDevices []*devices.Device `json:"allowed_devices"`
AutoCreatedDevices []*devices.Device `json:"autocreated_devices"`
Terminal Terminal `json:"-"` // standard or tty terminal
Console string `json:"-"` // dev/console path

View file

@ -12,7 +12,7 @@ import (
func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
switch name {
case "lxc":
// we want to five the lxc driver the full docker root because it needs
// we want to give the lxc driver the full docker root because it needs
// to access and write config and template files in /var/lib/docker/containers/*
// to be backwards compatible
return lxc.NewDriver(root, sysInfo.AppArmor)

View file

@ -9,14 +9,16 @@ import (
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/cgroups"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer/cgroups"
"github.com/dotcloud/docker/pkg/libcontainer/mount/nodes"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/utils"
)
@ -25,6 +27,7 @@ const DriverName = "lxc"
func init() {
execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
runtime.LockOSThread()
if err := setupEnv(args); err != nil {
return err
}
@ -159,6 +162,10 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
c.Path = aname
c.Args = append([]string{name}, arg...)
if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
return -1, err
}
if err := c.Start(); err != nil {
return -1, err
}
@ -167,6 +174,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
waitErr error
waitLock = make(chan struct{})
)
go func() {
if err := c.Wait(); err != nil {
if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
@ -181,9 +189,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
if err != nil {
if c.Process != nil {
c.Process.Kill()
c.Wait()
}
return -1, err
}
c.ContainerPid = pid
if startCallback != nil {
@ -208,6 +218,30 @@ func (d *driver) Kill(c *execdriver.Command, sig int) error {
return KillLxc(c.ID, sig)
}
func (d *driver) Pause(c *execdriver.Command) error {
_, err := exec.LookPath("lxc-freeze")
if err == nil {
output, errExec := exec.Command("lxc-freeze", "-n", c.ID).CombinedOutput()
if errExec != nil {
return fmt.Errorf("Err: %s Output: %s", errExec, output)
}
}
return err
}
func (d *driver) Unpause(c *execdriver.Command) error {
_, err := exec.LookPath("lxc-unfreeze")
if err == nil {
output, errExec := exec.Command("lxc-unfreeze", "-n", c.ID).CombinedOutput()
if errExec != nil {
return fmt.Errorf("Err: %s Output: %s", errExec, output)
}
}
return err
}
func (d *driver) Terminate(c *execdriver.Command) error {
return KillLxc(c.ID, 9)
}
@ -268,18 +302,14 @@ func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (in
}
output, err = d.getInfo(c.ID)
if err != nil {
output, err = d.getInfo(c.ID)
if err == nil {
info, err := parseLxcInfo(string(output))
if err != nil {
return -1, err
}
}
info, err := parseLxcInfo(string(output))
if err != nil {
return -1, err
}
if info.Running {
return info.Pid, nil
if info.Running {
return info.Pid, nil
}
}
time.Sleep(50 * time.Millisecond)
}

View file

@ -88,7 +88,7 @@ func setupNetworking(args *execdriver.InitArgs) error {
return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway)
}
if err := netlink.AddDefaultGw(gw); err != nil {
if err := netlink.AddDefaultGw(gw.String(), "eth0"); err != nil {
return fmt.Errorf("Unable to set up networking: %v", err)
}
}

View file

@ -15,7 +15,9 @@ lxc.network.type = veth
lxc.network.link = {{.Network.Interface.Bridge}}
lxc.network.name = eth0
lxc.network.mtu = {{.Network.Mtu}}
{{else if not .Network.HostNetworking}}
{{else if .Network.HostNetworking}}
lxc.network.type = none
{{else}}
# network is disabled (-n=false)
lxc.network.type = empty
lxc.network.flags = up
@ -45,37 +47,10 @@ lxc.cgroup.devices.allow = a
{{else}}
# no implicit access to devices
lxc.cgroup.devices.deny = a
# but allow mknod for any device
lxc.cgroup.devices.allow = c *:* m
lxc.cgroup.devices.allow = b *:* m
# /dev/null and zero
lxc.cgroup.devices.allow = c 1:3 rwm
lxc.cgroup.devices.allow = c 1:5 rwm
# consoles
lxc.cgroup.devices.allow = c 5:1 rwm
lxc.cgroup.devices.allow = c 5:0 rwm
lxc.cgroup.devices.allow = c 4:0 rwm
lxc.cgroup.devices.allow = c 4:1 rwm
# /dev/urandom,/dev/random
lxc.cgroup.devices.allow = c 1:9 rwm
lxc.cgroup.devices.allow = c 1:8 rwm
# /dev/pts/ - pts namespaces are "coming soon"
lxc.cgroup.devices.allow = c 136:* rwm
lxc.cgroup.devices.allow = c 5:2 rwm
# tuntap
lxc.cgroup.devices.allow = c 10:200 rwm
# fuse
#lxc.cgroup.devices.allow = c 10:229 rwm
# rtc
#lxc.cgroup.devices.allow = c 254:0 rwm
#Allow the devices passed to us in the AllowedDevices list.
{{range $allowedDevice := .AllowedDevices}}
lxc.cgroup.devices.allow = {{$allowedDevice.GetCgroupAllowString}}
{{end}}
{{end}}
# standard mount point
@ -126,6 +101,9 @@ lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}
{{if .Resources.CpuShares}}
lxc.cgroup.cpu.shares = {{.Resources.CpuShares}}
{{end}}
{{if .Resources.Cpuset}}
lxc.cgroup.cpuset.cpus = {{.Resources.Cpuset}}
{{end}}
{{end}}
{{if .Config.lxc}}

View file

@ -3,7 +3,6 @@ package lxc
import (
"bufio"
"fmt"
"github.com/dotcloud/docker/daemon/execdriver"
"io/ioutil"
"math/rand"
"os"
@ -11,6 +10,9 @@ import (
"strings"
"testing"
"time"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/libcontainer/devices"
)
func TestLXCConfig(t *testing.T) {
@ -47,6 +49,7 @@ func TestLXCConfig(t *testing.T) {
Mtu: 1500,
Interface: nil,
},
AllowedDevices: make([]*devices.Device, 0),
}
p, err := driver.generateLXCConfig(command)
if err != nil {

View file

@ -8,7 +8,7 @@ import (
"strings"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/pkg/units"
)
type Action func(*libcontainer.Container, interface{}, string) error
@ -75,7 +75,7 @@ func memory(container *libcontainer.Container, context interface{}, value string
return fmt.Errorf("cannot set cgroups when they are disabled")
}
v, err := utils.RAMInBytes(value)
v, err := units.RAMInBytes(value)
if err != nil {
return err
}
@ -88,7 +88,7 @@ func memoryReservation(container *libcontainer.Container, context interface{}, v
return fmt.Errorf("cannot set cgroups when they are disabled")
}
v, err := utils.RAMInBytes(value)
v, err := units.RAMInBytes(value)
if err != nil {
return err
}
@ -109,12 +109,19 @@ func memorySwap(container *libcontainer.Container, context interface{}, value st
}
func addCap(container *libcontainer.Container, context interface{}, value string) error {
container.CapabilitiesMask[value] = true
container.Capabilities = append(container.Capabilities, value)
return nil
}
func dropCap(container *libcontainer.Container, context interface{}, value string) error {
container.CapabilitiesMask[value] = false
// If the capability is specified multiple times, remove all instances.
for i, capability := range container.Capabilities {
if capability == value {
container.Capabilities = append(container.Capabilities[:i], container.Capabilities[i+1:]...)
}
}
// The capability wasn't found so we will drop it anyways.
return nil
}

View file

@ -4,8 +4,19 @@ import (
"testing"
"github.com/dotcloud/docker/daemon/execdriver/native/template"
"github.com/dotcloud/docker/pkg/libcontainer"
)
// Checks whether the expected capability is specified in the capabilities.
func hasCapability(expected string, capabilities []string) bool {
for _, capability := range capabilities {
if capability == expected {
return true
}
}
return false
}
func TestSetReadonlyRootFs(t *testing.T) {
var (
container = template.New()
@ -39,10 +50,10 @@ func TestConfigurationsDoNotConflict(t *testing.T) {
t.Fatal(err)
}
if !container1.CapabilitiesMask["NET_ADMIN"] {
if !hasCapability("NET_ADMIN", container1.Capabilities) {
t.Fatal("container one should have NET_ADMIN enabled")
}
if container2.CapabilitiesMask["NET_ADMIN"] {
if hasCapability("NET_ADMIN", container2.Capabilities) {
t.Fatal("container two should not have NET_ADMIN enabled")
}
}
@ -138,10 +149,10 @@ func TestAddCap(t *testing.T) {
t.Fatal(err)
}
if !container.CapabilitiesMask["MKNOD"] {
if !hasCapability("MKNOD", container.Capabilities) {
t.Fatal("container should have MKNOD enabled")
}
if !container.CapabilitiesMask["SYS_ADMIN"] {
if !hasCapability("SYS_ADMIN", container.Capabilities) {
t.Fatal("container should have SYS_ADMIN enabled")
}
}
@ -154,14 +165,12 @@ func TestDropCap(t *testing.T) {
}
)
// enabled all caps like in privileged mode
for key := range container.CapabilitiesMask {
container.CapabilitiesMask[key] = true
}
container.Capabilities = libcontainer.GetAllCapabilities()
if err := ParseConfiguration(container, nil, opts); err != nil {
t.Fatal(err)
}
if container.CapabilitiesMask["MKNOD"] {
if hasCapability("MKNOD", container.Capabilities) {
t.Fatal("container should not have MKNOD enabled")
}
}

View file

@ -3,6 +3,7 @@ package native
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/dotcloud/docker/daemon/execdriver"
@ -10,6 +11,7 @@ import (
"github.com/dotcloud/docker/daemon/execdriver/native/template"
"github.com/dotcloud/docker/pkg/apparmor"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/devices"
)
// createContainer populates and configures the container type with the
@ -23,6 +25,8 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
container.WorkingDir = c.WorkingDir
container.Env = c.Env
container.Cgroups.Name = c.ID
container.Cgroups.AllowedDevices = c.AllowedDevices
container.DeviceNodes = c.AutoCreatedDevices
// check to see if we are running in ramdisk to disable pivot root
container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
container.Context["restrictions"] = "true"
@ -34,8 +38,6 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
if err := d.setPrivileged(container); err != nil {
return nil, err
}
} else {
container.Mounts = append(container.Mounts, libcontainer.Mount{Type: "devtmpfs"})
}
if err := d.setupCgroups(container, c); err != nil {
return nil, err
@ -46,7 +48,13 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
if err := d.setupLabels(container, c); err != nil {
return nil, err
}
if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil {
cmds := make(map[string]*exec.Cmd)
d.Lock()
for k, v := range d.activeContainers {
cmds[k] = v.cmd
}
d.Unlock()
if err := configuration.ParseConfiguration(container, cmds, c.Config["native"]); err != nil {
return nil, err
}
return container, nil
@ -82,10 +90,14 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.
}
if c.Network.ContainerID != "" {
cmd := d.activeContainers[c.Network.ContainerID]
if cmd == nil || cmd.Process == nil {
d.Lock()
active := d.activeContainers[c.Network.ContainerID]
d.Unlock()
if active == nil || active.cmd.Process == nil {
return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID)
}
cmd := active.cmd
nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
container.Networks = append(container.Networks, &libcontainer.Network{
Type: "netns",
@ -97,11 +109,15 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.
return nil
}
func (d *driver) setPrivileged(container *libcontainer.Container) error {
for key := range container.CapabilitiesMask {
container.CapabilitiesMask[key] = true
func (d *driver) setPrivileged(container *libcontainer.Container) (err error) {
container.Capabilities = libcontainer.GetAllCapabilities()
container.Cgroups.AllowAllDevices = true
hostDeviceNodes, err := devices.GetHostDeviceNodes()
if err != nil {
return err
}
container.Cgroups.DeviceAccess = true
container.DeviceNodes = hostDeviceNodes
delete(container.Context, "restrictions")
@ -117,6 +133,7 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C
container.Cgroups.Memory = c.Resources.Memory
container.Cgroups.MemoryReservation = c.Resources.Memory
container.Cgroups.MemorySwap = c.Resources.MemorySwap
container.Cgroups.CpusetCpus = c.Resources.Cpuset
}
return nil
}

View file

@ -7,22 +7,22 @@ import (
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/apparmor"
"github.com/dotcloud/docker/pkg/cgroups"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/nsinit"
"github.com/dotcloud/docker/pkg/libcontainer/cgroups/fs"
"github.com/dotcloud/docker/pkg/libcontainer/cgroups/systemd"
"github.com/dotcloud/docker/pkg/libcontainer/namespaces"
"github.com/dotcloud/docker/pkg/system"
)
const (
DriverName = "native"
Version = "0.2"
BackupApparmorProfilePath = "apparmor/docker.back" // relative to docker root
DriverName = "native"
Version = "0.2"
)
func init() {
@ -42,35 +42,43 @@ func init() {
if err != nil {
return err
}
syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(args.Pipe))
syncPipe, err := namespaces.NewSyncPipeFromFd(0, uintptr(args.Pipe))
if err != nil {
return err
}
if err := nsinit.Init(container, rootfs, args.Console, syncPipe, args.Args); err != nil {
if err := namespaces.Init(container, rootfs, args.Console, syncPipe, args.Args); err != nil {
return err
}
return nil
})
}
type activeContainer struct {
container *libcontainer.Container
cmd *exec.Cmd
}
type driver struct {
root string
initPath string
activeContainers map[string]*exec.Cmd
activeContainers map[string]*activeContainer
sync.Mutex
}
func NewDriver(root, initPath string) (*driver, error) {
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
// native driver root is at docker_root/execdriver/native. Put apparmor at docker_root
if err := apparmor.InstallDefaultProfile(filepath.Join(root, "../..", BackupApparmorProfilePath)); err != nil {
if err := apparmor.InstallDefaultProfile(); err != nil {
return nil, err
}
return &driver{
root: root,
initPath: initPath,
activeContainers: make(map[string]*exec.Cmd),
activeContainers: make(map[string]*activeContainer),
}, nil
}
@ -80,7 +88,12 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
if err != nil {
return -1, err
}
d.activeContainers[c.ID] = &c.Cmd
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
cmd: &c.Cmd,
}
d.Unlock()
var (
dataPath = filepath.Join(d.root, c.ID)
@ -97,8 +110,8 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
term := getTerminal(c, pipes)
return nsinit.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Container, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
// we need to join the rootfs because nsinit will setup the rootfs and chroot
return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Container, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
// we need to join the rootfs because namespaces will setup the rootfs and chroot
initPath := filepath.Join(c.Rootfs, c.InitPath)
c.Path = d.initPath
@ -113,7 +126,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
// set this to nil so that when we set the clone flags anything else is reset
c.SysProcAttr = nil
system.SetCloneFlags(&c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces)))
system.SetCloneFlags(&c.Cmd, uintptr(namespaces.GetNamespaceFlags(container.Namespaces)))
c.ExtraFiles = []*os.File{child}
c.Env = container.Env
@ -132,6 +145,30 @@ func (d *driver) Kill(p *execdriver.Command, sig int) error {
return syscall.Kill(p.Process.Pid, syscall.Signal(sig))
}
func (d *driver) Pause(c *execdriver.Command) error {
active := d.activeContainers[c.ID]
if active == nil {
return fmt.Errorf("active container for %s does not exist", c.ID)
}
active.container.Cgroups.Freezer = "FROZEN"
if systemd.UseSystemd() {
return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer)
}
return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer)
}
func (d *driver) Unpause(c *execdriver.Command) error {
active := d.activeContainers[c.ID]
if active == nil {
return fmt.Errorf("active container for %s does not exist", c.ID)
}
active.container.Cgroups.Freezer = "THAWED"
if systemd.UseSystemd() {
return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer)
}
return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer)
}
func (d *driver) Terminate(p *execdriver.Command) error {
// lets check the start time for the process
started, err := d.readStartTime(p)
@ -150,6 +187,7 @@ func (d *driver) Terminate(p *execdriver.Command) error {
}
if started == currentStartTime {
err = syscall.Kill(p.Process.Pid, 9)
syscall.Wait4(p.Process.Pid, nil, 0, nil)
}
d.removeContainerRoot(p.ID)
return err
@ -175,41 +213,20 @@ func (d *driver) Name() string {
return fmt.Sprintf("%s-%s", DriverName, Version)
}
// TODO: this can be improved with our driver
// there has to be a better way to do this
func (d *driver) GetPidsForContainer(id string) ([]int, error) {
pids := []int{}
d.Lock()
active := d.activeContainers[id]
d.Unlock()
subsystem := "devices"
cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem)
if err != nil {
return pids, err
}
cgroupDir, err := cgroups.GetThisCgroupDir(subsystem)
if err != nil {
return pids, err
if active == nil {
return nil, fmt.Errorf("active container for %s does not exist", id)
}
c := active.container.Cgroups
filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks")
if _, err := os.Stat(filename); os.IsNotExist(err) {
filename = filepath.Join(cgroupRoot, cgroupDir, "docker", id, "tasks")
if systemd.UseSystemd() {
return systemd.GetPids(c)
}
output, err := ioutil.ReadFile(filename)
if err != nil {
return pids, err
}
for _, p := range strings.Split(string(output), "\n") {
if len(p) == 0 {
continue
}
pid, err := strconv.Atoi(p)
if err != nil {
return pids, fmt.Errorf("Invalid pid '%s': %s", p, err)
}
pids = append(pids, pid)
}
return pids, nil
return fs.GetPids(c)
}
func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error {
@ -225,6 +242,10 @@ func (d *driver) createContainerRoot(id string) error {
}
func (d *driver) removeContainerRoot(id string) error {
d.Lock()
delete(d.activeContainers, id)
d.Unlock()
return os.RemoveAll(filepath.Join(d.root, id))
}
@ -238,8 +259,8 @@ func getEnv(key string, env []string) string {
return ""
}
func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) nsinit.Terminal {
var term nsinit.Terminal
func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) namespaces.Terminal {
var term namespaces.Terminal
if c.Tty {
term = &dockerTtyTerm{
pipes: pipes,

View file

@ -2,30 +2,25 @@ package template
import (
"github.com/dotcloud/docker/pkg/apparmor"
"github.com/dotcloud/docker/pkg/cgroups"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/cgroups"
)
// New returns the docker default configuration for libcontainer
func New() *libcontainer.Container {
container := &libcontainer.Container{
CapabilitiesMask: map[string]bool{
"SETPCAP": false,
"SYS_MODULE": false,
"SYS_RAWIO": false,
"SYS_PACCT": false,
"SYS_ADMIN": false,
"SYS_NICE": false,
"SYS_RESOURCE": false,
"SYS_TIME": false,
"SYS_TTY_CONFIG": false,
"AUDIT_WRITE": false,
"AUDIT_CONTROL": false,
"MAC_OVERRIDE": false,
"MAC_ADMIN": false,
"NET_ADMIN": false,
"MKNOD": true,
"SYSLOG": false,
Capabilities: []string{
"CHOWN",
"DAC_OVERRIDE",
"FOWNER",
"MKNOD",
"NET_RAW",
"SETGID",
"SETUID",
"SETFCAP",
"SETPCAP",
"NET_BIND_SERVICE",
"SYS_CHROOT",
},
Namespaces: map[string]bool{
"NEWNS": true,
@ -35,8 +30,8 @@ func New() *libcontainer.Container {
"NEWNET": true,
},
Cgroups: &cgroups.Cgroup{
Parent: "docker",
DeviceAccess: false,
Parent: "docker",
AllowAllDevices: false,
},
Context: libcontainer.Context{},
}

View file

@ -7,7 +7,7 @@ aufs driver directory structure
   1
   2
   3
diffs // Content of the layer
diff // Content of the layer
   1 // Contains layers that need to be mounted for the id
   2
   3
@ -23,20 +23,26 @@ package aufs
import (
"bufio"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/pkg/label"
mountpk "github.com/dotcloud/docker/pkg/mount"
"github.com/dotcloud/docker/utils"
"os"
"os/exec"
"path"
"strings"
"sync"
"syscall"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/pkg/label"
mountpk "github.com/dotcloud/docker/pkg/mount"
"github.com/dotcloud/docker/utils"
)
var (
ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems")
incompatibleFsMagic = []graphdriver.FsMagic{
graphdriver.FsMagicBtrfs,
graphdriver.FsMagicAufs,
}
)
func init() {
@ -51,11 +57,25 @@ type Driver struct {
// New returns a new AUFS driver.
// An error is returned if AUFS is not supported.
func Init(root string) (graphdriver.Driver, error) {
func Init(root string, options []string) (graphdriver.Driver, error) {
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
return nil, err
return nil, graphdriver.ErrNotSupported
}
rootdir := path.Dir(root)
var buf syscall.Statfs_t
if err := syscall.Statfs(rootdir, &buf); err != nil {
return nil, fmt.Errorf("Couldn't stat the root directory: %s", err)
}
for _, magic := range incompatibleFsMagic {
if graphdriver.FsMagic(buf.Type) == magic {
return nil, graphdriver.ErrIncompatibleFS
}
}
paths := []string{
"mnt",
"diff",
@ -77,6 +97,10 @@ func Init(root string) (graphdriver.Driver, error) {
return nil, err
}
if err := graphdriver.MakePrivate(root); err != nil {
return nil, err
}
for _, p := range paths {
if err := os.MkdirAll(path.Join(root, p), 0755); err != nil {
return nil, err
@ -351,12 +375,14 @@ func (a *Driver) Cleanup() error {
if err != nil {
return err
}
for _, id := range ids {
if err := a.unmount(id); err != nil {
utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
}
}
return nil
return mountpk.Unmount(a.root)
}
func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {

View file

@ -17,9 +17,9 @@ var (
)
func testInit(dir string, t *testing.T) graphdriver.Driver {
d, err := Init(dir)
d, err := Init(dir, nil)
if err != nil {
if err == ErrAufsNotSupported {
if err == graphdriver.ErrNotSupported {
t.Skip(err)
} else {
t.Fatal(err)

View file

@ -11,18 +11,20 @@ import "C"
import (
"fmt"
"github.com/dotcloud/docker/daemon/graphdriver"
"os"
"path"
"syscall"
"unsafe"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/pkg/mount"
)
func init() {
graphdriver.Register("btrfs", Init)
}
func Init(home string) (graphdriver.Driver, error) {
func Init(home string, options []string) (graphdriver.Driver, error) {
rootdir := path.Dir(home)
var buf syscall.Statfs_t
@ -30,8 +32,16 @@ func Init(home string) (graphdriver.Driver, error) {
return nil, err
}
if buf.Type != 0x9123683E {
return nil, fmt.Errorf("%s is not a btrfs filesystem", rootdir)
if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs {
return nil, graphdriver.ErrPrerequisites
}
if err := os.MkdirAll(home, 0700); err != nil {
return nil, err
}
if err := graphdriver.MakePrivate(home); err != nil {
return nil, err
}
return &Driver{
@ -52,7 +62,7 @@ func (d *Driver) Status() [][2]string {
}
func (d *Driver) Cleanup() error {
return nil
return mount.Unmount(d.home)
}
func free(p *C.char) {

View file

@ -0,0 +1,28 @@
package btrfs
import (
"github.com/dotcloud/docker/daemon/graphdriver/graphtest"
"testing"
)
// This avoids creating a new driver for each test if all tests are run
// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown
func TestBtrfsSetup(t *testing.T) {
graphtest.GetDriver(t, "btrfs")
}
func TestBtrfsCreateEmpty(t *testing.T) {
graphtest.DriverTestCreateEmpty(t, "btrfs")
}
func TestBtrfsCreateBase(t *testing.T) {
graphtest.DriverTestCreateBase(t, "btrfs")
}
func TestBtrfsCreateSnap(t *testing.T) {
graphtest.DriverTestCreateSnap(t, "btrfs")
}
func TestBtrfsTeardown(t *testing.T) {
graphtest.PutDriver(t)
}

View file

@ -0,0 +1,143 @@
## devicemapper - a storage backend based on Device Mapper
### Theory of operation
The device mapper graphdriver uses the device mapper thin provisioning
module (dm-thinp) to implement CoW snapshots. For each devicemapper
graph location (typically `/var/lib/docker/devicemapper`, $graph below)
a thin pool is created based on two block devices, one for data and
one for metadata. By default these block devices are created
automatically by using loopback mounts of automatically creates sparse
files.
The default loopback files used are `$graph/devicemapper/data` and
`$graph/devicemapper/metadata`. Additional metadata required to map
from docker entities to the corresponding devicemapper volumes is
stored in the `$graph/devicemapper/json` file (encoded as Json).
In order to support multiple devicemapper graphs on a system the thin
pool will be named something like: `docker-0:33-19478248-pool`, where
the `0:30` part is the minor/major device nr and `19478248` is the
inode number of the $graph directory.
On the thin pool docker automatically creates a base thin device,
called something like `docker-0:33-19478248-base` of a fixed
size. This is automatically formated on creation and contains just an
empty filesystem. This device is the base of all docker images and
containers. All base images are snapshots of this device and those
images are then in turn used as snapshots for other images and
eventually containers.
### options
The devicemapper backend supports some options that you can specify
when starting the docker daemon using the --storage-opt flags.
This uses the `dm` prefix and would be used somthing like `docker -d --storage-opt dm.foo=bar`.
Here is the list of supported options:
* `dm.basesize`
Specifies the size to use when creating the base device, which
limits the size of images and containers. The default value is
10G. Note, thin devices are inherently "sparse", so a 10G device
which is mostly empty doesn't use 10 GB of space on the
pool. However, the filesystem will use more space for the empty
case the larger the device is.
Example use:
``docker -d --storage-opt dm.basesize=20G``
* `dm.loopdatasize`
Specifies the size to use when creating the loopback file for the
"data" device which is used for the thin pool. The default size is
100G. Note that the file is sparse, so it will not initially take
up this much space.
Example use:
``docker -d --storage-opt dm.loopdatasize=200G``
* `dm.loopmetadatasize`
Specifies the size to use when creating the loopback file for the
"metadadata" device which is used for the thin pool. The default size is
2G. Note that the file is sparse, so it will not initially take
up this much space.
Example use:
``docker -d --storage-opt dm.loopmetadatasize=4G``
* `dm.fs`
Specifies the filesystem type to use for the base device. The supported
options are "ext4" and "xfs". The default is "ext4"
Example use:
``docker -d --storage-opt dm.fs=xfs``
* `dm.mkfsarg`
Specifies extra mkfs arguments to be used when creating the base device.
Example use:
``docker -d --storage-opt "dm.mkfsarg=-O ^has_journal"``
* `dm.mountopt`
Specifies extra mount options used when mounting the thin devices.
Example use:
``docker -d --storage-opt dm.mountopt=nodiscard``
* `dm.datadev`
Specifies a custom blockdevice to use for data for the thin pool.
If using a block device for device mapper storage, ideally both
datadev and metadatadev should be specified to completely avoid
using the loopback device.
Example use:
``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1``
* `dm.metadatadev`
Specifies a custom blockdevice to use for metadata for the thin
pool.
For best performance the metadata should be on a different spindle
than the data, or even better on an SSD.
If setting up a new metadata pool it is required to be valid. This
can be achieved by zeroing the first 4k to indicate empty
metadata, like this:
``dd if=/dev/zero of=$metadata_dev bs=4096 count=1```
Example use:
``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1``
* `dm.blkdiscard`
Enables or disables the use of blkdiscard when removing
devicemapper devices. This is enabled by default (only) if using
loopback devices and is required to res-parsify the loopback file
on image/container removal.
Disabling this on loopback can lead to *much* faster container
removal times, but will make the space used in /var/lib/docker
directory not be returned to the system for other use when
containers are removed.
Example use:
``docker -d --storage-opt dm.blkdiscard=false``

View file

@ -4,6 +4,9 @@ package devmapper
import (
"fmt"
"os"
"syscall"
"github.com/dotcloud/docker/utils"
)
@ -14,7 +17,7 @@ func stringToLoopName(src string) [LoNameSize]uint8 {
}
func getNextFreeLoopbackIndex() (int, error) {
f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644)
f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644)
if err != nil {
return 0, err
}
@ -27,27 +30,27 @@ func getNextFreeLoopbackIndex() (int, error) {
return index, err
}
func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) {
func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) {
// Start looking for a free /dev/loop
for {
target := fmt.Sprintf("/dev/loop%d", index)
index++
fi, err := osStat(target)
fi, err := os.Stat(target)
if err != nil {
if osIsNotExist(err) {
if os.IsNotExist(err) {
utils.Errorf("There are no more loopback device available.")
}
return nil, ErrAttachLoopbackDevice
}
if fi.Mode()&osModeDevice != osModeDevice {
if fi.Mode()&os.ModeDevice != os.ModeDevice {
utils.Errorf("Loopback device %s is not a block device.", target)
continue
}
// OpenFile adds O_CLOEXEC
loopFile, err = osOpenFile(target, osORdWr, 0644)
loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
if err != nil {
utils.Errorf("Error openning loopback device: %s", err)
return nil, ErrAttachLoopbackDevice
@ -58,7 +61,7 @@ func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile,
loopFile.Close()
// If the error is EBUSY, then try the next loopback
if err != sysEBusy {
if err != syscall.EBUSY {
utils.Errorf("Cannot set up loopback device %s: %s", target, err)
return nil, ErrAttachLoopbackDevice
}
@ -80,8 +83,8 @@ func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile,
}
// attachLoopDevice attaches the given sparse file to the next
// available loopback device. It returns an opened *osFile.
func attachLoopDevice(sparseName string) (loop *osFile, err error) {
// available loopback device. It returns an opened *os.File.
func attachLoopDevice(sparseName string) (loop *os.File, err error) {
// Try to retrieve the next available loopback device via syscall.
// If it fails, we discard error and start loopking for a
@ -92,7 +95,7 @@ func attachLoopDevice(sparseName string) (loop *osFile, err error) {
}
// OpenFile adds O_CLOEXEC
sparseFile, err := osOpenFile(sparseName, osORdWr, 0644)
sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
if err != nil {
utils.Errorf("Error openning sparse file %s: %s", sparseName, err)
return nil, ErrAttachLoopbackDevice

View file

@ -8,6 +8,8 @@ import (
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
@ -16,7 +18,9 @@ import (
"syscall"
"time"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/units"
"github.com/dotcloud/docker/utils"
)
@ -62,8 +66,18 @@ type DeviceSet struct {
devicePrefix string
TransactionId uint64
NewTransactionId uint64
nextFreeDevice int
sawBusy bool
nextDeviceId int
// Options
dataLoopbackSize int64
metaDataLoopbackSize int64
baseFsSize uint64
filesystem string
mountOptions string
mkfsArgs []string
dataDevice string
metadataDevice string
doBlkDiscard bool
}
type DiskUsage struct {
@ -109,7 +123,19 @@ func (devices *DeviceSet) loopbackDir() string {
return path.Join(devices.root, "devicemapper")
}
func (devices *DeviceSet) jsonFile() string {
func (devices *DeviceSet) metadataDir() string {
return path.Join(devices.root, "metadata")
}
func (devices *DeviceSet) metadataFile(info *DevInfo) string {
file := info.Hash
if file == "" {
file = "base"
}
return path.Join(devices.metadataDir(), file)
}
func (devices *DeviceSet) oldMetadataFile() string {
return path.Join(devices.loopbackDir(), "json")
}
@ -125,7 +151,7 @@ func (devices *DeviceSet) hasImage(name string) bool {
dirname := devices.loopbackDir()
filename := path.Join(dirname, name)
_, err := osStat(filename)
_, err := os.Stat(filename)
return err == nil
}
@ -137,16 +163,16 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
dirname := devices.loopbackDir()
filename := path.Join(dirname, name)
if err := osMkdirAll(dirname, 0700); err != nil && !osIsExist(err) {
if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) {
return "", err
}
if _, err := osStat(filename); err != nil {
if !osIsNotExist(err) {
if _, err := os.Stat(filename); err != nil {
if !os.IsNotExist(err) {
return "", err
}
utils.Debugf("Creating loopback file %s for device-manage use", filename)
file, err := osOpenFile(filename, osORdWr|osOCreate, 0600)
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return "", err
}
@ -159,26 +185,24 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
return filename, nil
}
func (devices *DeviceSet) allocateDeviceId() int {
// TODO: Add smarter reuse of deleted devices
id := devices.nextFreeDevice
devices.nextFreeDevice = devices.nextFreeDevice + 1
return id
}
func (devices *DeviceSet) allocateTransactionId() uint64 {
devices.NewTransactionId = devices.NewTransactionId + 1
return devices.NewTransactionId
}
func (devices *DeviceSet) saveMetadata() error {
devices.devicesLock.Lock()
jsonData, err := json.Marshal(devices.MetaData)
devices.devicesLock.Unlock()
func (devices *DeviceSet) removeMetadata(info *DevInfo) error {
if err := os.RemoveAll(devices.metadataFile(info)); err != nil {
return fmt.Errorf("Error removing metadata file %s: %s", devices.metadataFile(info), err)
}
return nil
}
func (devices *DeviceSet) saveMetadata(info *DevInfo) error {
jsonData, err := json.Marshal(info)
if err != nil {
return fmt.Errorf("Error encoding metadata to json: %s", err)
}
tmpFile, err := ioutil.TempFile(filepath.Dir(devices.jsonFile()), ".json")
tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp")
if err != nil {
return fmt.Errorf("Error creating metadata file: %s", err)
}
@ -196,7 +220,7 @@ func (devices *DeviceSet) saveMetadata() error {
if err := tmpFile.Close(); err != nil {
return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err)
}
if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil {
if err := os.Rename(tmpFile.Name(), devices.metadataFile(info)); err != nil {
return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err)
}
@ -214,7 +238,12 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) {
defer devices.devicesLock.Unlock()
info := devices.Devices[hash]
if info == nil {
return nil, fmt.Errorf("Unknown device %s", hash)
info = devices.loadMetadata(hash)
if info == nil {
return nil, fmt.Errorf("Unknown device %s", hash)
}
devices.Devices[hash] = info
}
return info, nil
}
@ -234,7 +263,7 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev
devices.Devices[hash] = info
devices.devicesLock.Unlock()
if err := devices.saveMetadata(); err != nil {
if err := devices.saveMetadata(info); err != nil {
// Try to remove unused device
devices.devicesLock.Lock()
delete(devices.Devices, hash)
@ -258,63 +287,94 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error {
func (devices *DeviceSet) createFilesystem(info *DevInfo) error {
devname := info.DevName()
err := execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname)
if err != nil {
err = execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname)
args := []string{}
for _, arg := range devices.mkfsArgs {
args = append(args, arg)
}
args = append(args, devname)
var err error
switch devices.filesystem {
case "xfs":
err = exec.Command("mkfs.xfs", args...).Run()
case "ext4":
err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run()
if err != nil {
err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run()
}
default:
err = fmt.Errorf("Unsupported filesystem type %s", devices.filesystem)
}
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
return nil
}
func (devices *DeviceSet) loadMetaData() error {
utils.Debugf("loadMetadata()")
defer utils.Debugf("loadMetadata END")
func (devices *DeviceSet) initMetaData() error {
_, _, _, params, err := getStatus(devices.getPoolName())
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
devices.NewTransactionId = devices.TransactionId
jsonData, err := ioutil.ReadFile(devices.jsonFile())
if err != nil && !osIsNotExist(err) {
utils.Debugf("\n--->Err: %s\n", err)
// Migrate old metadatafile
jsonData, err := ioutil.ReadFile(devices.oldMetadataFile())
if err != nil && !os.IsNotExist(err) {
return err
}
devices.MetaData.Devices = make(map[string]*DevInfo)
if jsonData != nil {
if err := json.Unmarshal(jsonData, &devices.MetaData); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
m := MetaData{Devices: make(map[string]*DevInfo)}
if err := json.Unmarshal(jsonData, &m); err != nil {
return err
}
}
for hash, d := range devices.Devices {
d.Hash = hash
d.devices = devices
for hash, info := range m.Devices {
info.Hash = hash
if d.DeviceId >= devices.nextFreeDevice {
devices.nextFreeDevice = d.DeviceId + 1
// If the transaction id is larger than the actual one we lost the device due to some crash
if info.TransactionId <= devices.TransactionId {
devices.saveMetadata(info)
}
}
if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil {
return err
}
// If the transaction id is larger than the actual one we lost the device due to some crash
if d.TransactionId > devices.TransactionId {
utils.Debugf("Removing lost device %s with id %d", hash, d.TransactionId)
delete(devices.Devices, hash)
}
}
return nil
}
func (devices *DeviceSet) loadMetadata(hash string) *DevInfo {
info := &DevInfo{Hash: hash, devices: devices}
jsonData, err := ioutil.ReadFile(devices.metadataFile(info))
if err != nil {
return nil
}
if err := json.Unmarshal(jsonData, &info); err != nil {
return nil
}
// If the transaction id is larger than the actual one we lost the device due to some crash
if info.TransactionId > devices.TransactionId {
return nil
}
return info
}
func (devices *DeviceSet) setupBaseImage() error {
oldInfo, _ := devices.lookupDevice("")
if oldInfo != nil && oldInfo.Initialized {
@ -324,45 +384,42 @@ func (devices *DeviceSet) setupBaseImage() error {
if oldInfo != nil && !oldInfo.Initialized {
utils.Debugf("Removing uninitialized base image")
if err := devices.deleteDevice(oldInfo); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
}
utils.Debugf("Initializing base device-manager snapshot")
id := devices.allocateDeviceId()
id := devices.nextDeviceId
// Create initial device
if err := createDevice(devices.getPoolDevName(), id); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
if err := createDevice(devices.getPoolDevName(), &id); err != nil {
return err
}
utils.Debugf("Registering base device (id %v) with FS size %v", id, DefaultBaseFsSize)
info, err := devices.registerDevice(id, "", DefaultBaseFsSize)
// Ids are 24bit, so wrap around
devices.nextDeviceId = (id + 1) & 0xffffff
utils.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize)
info, err := devices.registerDevice(id, "", devices.baseFsSize)
if err != nil {
_ = deleteDevice(devices.getPoolDevName(), id)
utils.Debugf("\n--->Err: %s\n", err)
return err
}
utils.Debugf("Creating filesystem on base device-manager snapshot")
if err = devices.activateDeviceIfNeeded(info); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
if err := devices.createFilesystem(info); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
info.Initialized = true
if err = devices.saveMetadata(); err != nil {
if err = devices.saveMetadata(info); err != nil {
info.Initialized = false
utils.Debugf("\n--->Err: %s\n", err)
return err
}
@ -372,11 +429,11 @@ func (devices *DeviceSet) setupBaseImage() error {
func setCloseOnExec(name string) {
if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil {
for _, i := range fileInfos {
link, _ := osReadlink(filepath.Join("/proc/self/fd", i.Name()))
link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
if link == name {
fd, err := strconv.Atoi(i.Name())
if err == nil {
sysCloseOnExec(fd)
syscall.CloseOnExec(fd)
}
}
}
@ -388,10 +445,6 @@ func (devices *DeviceSet) log(level int, file string, line int, dmError int, mes
return // Ignore _LOG_DEBUG
}
if strings.Contains(message, "busy") {
devices.sawBusy = true
}
utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
}
@ -408,7 +461,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
datafilename := path.Join(dirname, "data")
metadatafilename := path.Join(dirname, "metadata")
datafile, err := osOpenFile(datafilename, osORdWr, 0)
datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0)
if datafile == nil {
return err
}
@ -429,7 +482,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
}
defer dataloopback.Close()
metadatafile, err := osOpenFile(metadatafilename, osORdWr, 0)
metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0)
if metadatafile == nil {
return err
}
@ -472,39 +525,23 @@ func (devices *DeviceSet) ResizePool(size int64) error {
func (devices *DeviceSet) initDevmapper(doInit bool) error {
logInit(devices)
// Make sure the sparse images exist in <root>/devicemapper/data and
// <root>/devicemapper/metadata
hasData := devices.hasImage("data")
hasMetadata := devices.hasImage("metadata")
if !doInit && !hasData {
return errors.New("Loopback data file not found")
}
if !doInit && !hasMetadata {
return errors.New("Loopback metadata file not found")
}
createdLoopback := !hasData || !hasMetadata
data, err := devices.ensureImage("data", DefaultDataLoopbackSize)
_, err := getDriverVersion()
if err != nil {
utils.Debugf("Error device ensureImage (data): %s\n", err)
return err
// Can't even get driver version, assume not supported
return graphdriver.ErrNotSupported
}
metadata, err := devices.ensureImage("metadata", DefaultMetaDataLoopbackSize)
if err != nil {
utils.Debugf("Error device ensureImage (metadata): %s\n", err)
if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) {
return err
}
// Set the device prefix from the device id and inode of the docker root dir
st, err := osStat(devices.root)
st, err := os.Stat(devices.root)
if err != nil {
return fmt.Errorf("Error looking up dir %s: %s", devices.root, err)
}
sysSt := toSysStatT(st.Sys())
sysSt := st.Sys().(*syscall.Stat_t)
// "reg-" stands for "regular file".
// In the future we might use "dev-" for "device file", etc.
// docker-maj,min[-inode] stands for:
@ -527,35 +564,91 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
// so we add this badhack to make sure it closes itself
setCloseOnExec("/dev/mapper/control")
// Make sure the sparse images exist in <root>/devicemapper/data and
// <root>/devicemapper/metadata
createdLoopback := false
// If the pool doesn't exist, create it
if info.Exists == 0 {
utils.Debugf("Pool doesn't exist. Creating it.")
dataFile, err := attachLoopDevice(data)
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
var (
dataFile *os.File
metadataFile *os.File
)
if devices.dataDevice == "" {
// Make sure the sparse images exist in <root>/devicemapper/data
hasData := devices.hasImage("data")
if !doInit && !hasData {
return errors.New("Loopback data file not found")
}
if !hasData {
createdLoopback = true
}
data, err := devices.ensureImage("data", devices.dataLoopbackSize)
if err != nil {
utils.Debugf("Error device ensureImage (data): %s\n", err)
return err
}
dataFile, err = attachLoopDevice(data)
if err != nil {
return err
}
} else {
dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600)
if err != nil {
return err
}
}
defer dataFile.Close()
metadataFile, err := attachLoopDevice(metadata)
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
if devices.metadataDevice == "" {
// Make sure the sparse images exist in <root>/devicemapper/metadata
hasMetadata := devices.hasImage("metadata")
if !doInit && !hasMetadata {
return errors.New("Loopback metadata file not found")
}
if !hasMetadata {
createdLoopback = true
}
metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize)
if err != nil {
utils.Debugf("Error device ensureImage (metadata): %s\n", err)
return err
}
metadataFile, err = attachLoopDevice(metadata)
if err != nil {
return err
}
} else {
metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600)
if err != nil {
return err
}
}
defer metadataFile.Close()
if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
}
// If we didn't just create the data or metadata image, we need to
// load the metadata from the existing file.
// load the transaction id and migrate old metadata
if !createdLoopback {
if err = devices.loadMetaData(); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
if err = devices.initMetaData(); err != nil {
return err
}
}
@ -587,13 +680,16 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
return fmt.Errorf("device %s already exists", hash)
}
deviceId := devices.allocateDeviceId()
deviceId := devices.nextDeviceId
if err := devices.createSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil {
if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil {
utils.Debugf("Error creating snap device: %s\n", err)
return err
}
// Ids are 24bit, so wrap around
devices.nextDeviceId = (deviceId + 1) & 0xffffff
if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil {
deleteDevice(devices.getPoolDevName(), deviceId)
utils.Debugf("Error registering device: %s\n", err)
@ -603,12 +699,14 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
}
func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
// This is a workaround for the kernel not discarding block so
// on the thin pool when we remove a thinp device, so we do it
// manually
if err := devices.activateDeviceIfNeeded(info); err == nil {
if err := BlockDeviceDiscard(info.DevName()); err != nil {
utils.Debugf("Error discarding block on device: %s (ignoring)\n", err)
if devices.doBlkDiscard {
// This is a workaround for the kernel not discarding block so
// on the thin pool when we remove a thinp device, so we do it
// manually
if err := devices.activateDeviceIfNeeded(info); err == nil {
if err := BlockDeviceDiscard(info.DevName()); err != nil {
utils.Debugf("Error discarding block on device: %s (ignoring)\n", err)
}
}
}
@ -620,14 +718,6 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
}
}
if info.Initialized {
info.Initialized = false
if err := devices.saveMetadata(); err != nil {
utils.Debugf("Error saving meta data: %s\n", err)
return err
}
}
if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil {
utils.Debugf("Error deleting device: %s\n", err)
return err
@ -638,11 +728,11 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
delete(devices.Devices, info.Hash)
devices.devicesLock.Unlock()
if err := devices.saveMetadata(); err != nil {
if err := devices.removeMetadata(info); err != nil {
devices.devicesLock.Lock()
devices.Devices[info.Hash] = info
devices.devicesLock.Unlock()
utils.Debugf("Error saving meta data: %s\n", err)
utils.Debugf("Error removing meta data: %s\n", err)
return err
}
@ -670,7 +760,6 @@ func (devices *DeviceSet) deactivatePool() error {
devname := devices.getPoolDevName()
devinfo, err := getInfo(devname)
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
if devinfo.Exists != 0 {
@ -692,12 +781,10 @@ func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
devinfo, err := getInfo(info.Name())
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
if devinfo.Exists != 0 {
if err := devices.removeDeviceAndWait(info.Name()); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
}
@ -711,12 +798,11 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error {
var err error
for i := 0; i < 1000; i++ {
devices.sawBusy = false
err = removeDevice(devname)
if err == nil {
break
}
if !devices.sawBusy {
if err != ErrBusy {
return err
}
@ -813,7 +899,7 @@ func (devices *DeviceSet) Shutdown() error {
// We use MNT_DETACH here in case it is still busy in some running
// container. This means it'll go away from the global scope directly,
// and the device will be released when that container dies.
if err := sysUnmount(info.mountPath, syscall.MNT_DETACH); err != nil {
if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err)
}
@ -871,13 +957,26 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err)
}
var flags uintptr = sysMsMgcVal
var flags uintptr = syscall.MS_MGC_VAL
mountOptions := label.FormatMountLabel("discard", mountLabel)
err = sysMount(info.DevName(), path, "ext4", flags, mountOptions)
if err != nil && err == sysEInval {
mountOptions = label.FormatMountLabel("", mountLabel)
err = sysMount(info.DevName(), path, "ext4", flags, mountOptions)
fstype, err := ProbeFsType(info.DevName())
if err != nil {
return err
}
options := ""
if fstype == "xfs" {
// XFS needs nouuid or it can't mount filesystems with the same fs
options = joinMountOptions(options, "nouuid")
}
options = joinMountOptions(options, devices.mountOptions)
options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
err = syscall.Mount(info.DevName(), path, fstype, flags, joinMountOptions("discard", options))
if err != nil && err == syscall.EINVAL {
err = syscall.Mount(info.DevName(), path, fstype, flags, options)
}
if err != nil {
return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err)
@ -886,7 +985,7 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
info.mountCount = 1
info.mountPath = path
return devices.setInitialized(info)
return nil
}
func (devices *DeviceSet) UnmountDevice(hash string) error {
@ -914,8 +1013,7 @@ func (devices *DeviceSet) UnmountDevice(hash string) error {
}
utils.Debugf("[devmapper] Unmount(%s)", info.mountPath)
if err := sysUnmount(info.mountPath, 0); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
if err := syscall.Unmount(info.mountPath, 0); err != nil {
return err
}
utils.Debugf("[devmapper] Unmount done")
@ -937,14 +1035,6 @@ func (devices *DeviceSet) HasDevice(hash string) bool {
return info != nil
}
func (devices *DeviceSet) HasInitializedDevice(hash string) bool {
devices.Lock()
defer devices.Unlock()
info, _ := devices.lookupDevice(hash)
return info != nil && info.Initialized
}
func (devices *DeviceSet) HasActivatedDevice(hash string) bool {
info, _ := devices.lookupDevice(hash)
if info == nil {
@ -961,17 +1051,6 @@ func (devices *DeviceSet) HasActivatedDevice(hash string) bool {
return devinfo != nil && devinfo.Exists != 0
}
func (devices *DeviceSet) setInitialized(info *DevInfo) error {
info.Initialized = true
if err := devices.saveMetadata(); err != nil {
info.Initialized = false
utils.Debugf("\n--->Err: %s\n", err)
return err
}
return nil
}
func (devices *DeviceSet) List() []string {
devices.Lock()
defer devices.Unlock()
@ -1069,12 +1148,72 @@ func (devices *DeviceSet) Status() *Status {
return status
}
func NewDeviceSet(root string, doInit bool) (*DeviceSet, error) {
func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error) {
SetDevDir("/dev")
devices := &DeviceSet{
root: root,
MetaData: MetaData{Devices: make(map[string]*DevInfo)},
root: root,
MetaData: MetaData{Devices: make(map[string]*DevInfo)},
dataLoopbackSize: DefaultDataLoopbackSize,
metaDataLoopbackSize: DefaultMetaDataLoopbackSize,
baseFsSize: DefaultBaseFsSize,
filesystem: "ext4",
doBlkDiscard: true,
}
foundBlkDiscard := false
for _, option := range options {
key, val, err := utils.ParseKeyValueOpt(option)
if err != nil {
return nil, err
}
key = strings.ToLower(key)
switch key {
case "dm.basesize":
size, err := units.FromHumanSize(val)
if err != nil {
return nil, err
}
devices.baseFsSize = uint64(size)
case "dm.loopdatasize":
size, err := units.FromHumanSize(val)
if err != nil {
return nil, err
}
devices.dataLoopbackSize = size
case "dm.loopmetadatasize":
size, err := units.FromHumanSize(val)
if err != nil {
return nil, err
}
devices.metaDataLoopbackSize = size
case "dm.fs":
if val != "ext4" && val != "xfs" {
return nil, fmt.Errorf("Unsupported filesystem %s\n", val)
}
devices.filesystem = val
case "dm.mkfsarg":
devices.mkfsArgs = append(devices.mkfsArgs, val)
case "dm.mountopt":
devices.mountOptions = joinMountOptions(devices.mountOptions, val)
case "dm.metadatadev":
devices.metadataDevice = val
case "dm.datadev":
devices.dataDevice = val
case "dm.blkdiscard":
foundBlkDiscard = true
devices.doBlkDiscard, err = strconv.ParseBool(val)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("Unknown option %s\n", key)
}
}
// By default, don't do blk discard hack on raw devices, its rarely useful and is expensive
if !foundBlkDiscard && devices.dataDevice != "" {
devices.doBlkDiscard = false
}
if err := devices.initDevmapper(doInit); err != nil {

View file

@ -5,9 +5,11 @@ package devmapper
import (
"errors"
"fmt"
"github.com/dotcloud/docker/utils"
"os"
"runtime"
"syscall"
"github.com/dotcloud/docker/utils"
)
type DevmapperLogger interface {
@ -50,6 +52,7 @@ var (
ErrTaskAddTarget = errors.New("dm_task_add_target failed")
ErrTaskSetSector = errors.New("dm_task_set_sector failed")
ErrTaskGetInfo = errors.New("dm_task_get_info failed")
ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed")
ErrTaskSetCookie = errors.New("dm_task_set_cookie failed")
ErrNilCookie = errors.New("cookie ptr can't be nil")
ErrAttachLoopbackDevice = errors.New("loopback mounting failed")
@ -62,6 +65,10 @@ var (
ErrInvalidAddNode = errors.New("Invalide AddNoce type")
ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file")
ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity")
ErrBusy = errors.New("Device is Busy")
dmSawBusy bool
dmSawExist bool
)
type (
@ -172,6 +179,14 @@ func (t *Task) GetInfo() (*Info, error) {
return info, nil
}
func (t *Task) GetDriverVersion() (string, error) {
res := DmTaskGetDriverVersion(t.unmanaged)
if res == "" {
return "", ErrTaskGetDriverVersion
}
return res, nil
}
func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64,
length uint64, targetType string, params string) {
@ -180,7 +195,7 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64,
start, length, targetType, params
}
func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) {
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
loopInfo, err := ioctlLoopGetStatus64(file.Fd())
if err != nil {
utils.Errorf("Error get loopback backing file: %s\n", err)
@ -189,7 +204,7 @@ func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) {
return loopInfo.loDevice, loopInfo.loInode, nil
}
func LoopbackSetCapacity(file *osFile) error {
func LoopbackSetCapacity(file *os.File) error {
if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
utils.Errorf("Error loopbackSetCapacity: %s", err)
return ErrLoopbackSetCapacity
@ -197,20 +212,20 @@ func LoopbackSetCapacity(file *osFile) error {
return nil
}
func FindLoopDeviceFor(file *osFile) *osFile {
func FindLoopDeviceFor(file *os.File) *os.File {
stat, err := file.Stat()
if err != nil {
return nil
}
targetInode := stat.Sys().(*sysStatT).Ino
targetDevice := stat.Sys().(*sysStatT).Dev
targetInode := stat.Sys().(*syscall.Stat_t).Ino
targetDevice := stat.Sys().(*syscall.Stat_t).Dev
for i := 0; true; i++ {
path := fmt.Sprintf("/dev/loop%d", i)
file, err := osOpenFile(path, osORdWr, 0)
file, err := os.OpenFile(path, os.O_RDWR, 0)
if err != nil {
if osIsNotExist(err) {
if os.IsNotExist(err) {
return nil
}
@ -280,7 +295,7 @@ func RemoveDevice(name string) error {
return nil
}
func GetBlockDeviceSize(file *osFile) (uint64, error) {
func GetBlockDeviceSize(file *os.File) (uint64, error) {
size, err := ioctlBlkGetSize64(file.Fd())
if err != nil {
utils.Errorf("Error getblockdevicesize: %s", err)
@ -290,7 +305,7 @@ func GetBlockDeviceSize(file *osFile) (uint64, error) {
}
func BlockDeviceDiscard(path string) error {
file, err := osOpenFile(path, osORdWr, 0)
file, err := os.OpenFile(path, os.O_RDWR, 0)
if err != nil {
return err
}
@ -313,7 +328,7 @@ func BlockDeviceDiscard(path string) error {
}
// This is the programmatic example of "dmsetup create"
func createPool(poolName string, dataFile, metadataFile *osFile) error {
func createPool(poolName string, dataFile, metadataFile *os.File) error {
task, err := createTask(DeviceCreate, poolName)
if task == nil {
return err
@ -321,21 +336,21 @@ func createPool(poolName string, dataFile, metadataFile *osFile) error {
size, err := GetBlockDeviceSize(dataFile)
if err != nil {
return fmt.Errorf("Can't get data size")
return fmt.Errorf("Can't get data size %s", err)
}
params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing"
if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil {
return fmt.Errorf("Can't add target")
return fmt.Errorf("Can't add target %s", err)
}
var cookie uint = 0
if err := task.SetCookie(&cookie, 0); err != nil {
return fmt.Errorf("Can't set cookie")
return fmt.Errorf("Can't set cookie %s", err)
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceCreate (createPool)")
return fmt.Errorf("Error running DeviceCreate (createPool) %s", err)
}
UdevWait(cookie)
@ -343,7 +358,7 @@ func createPool(poolName string, dataFile, metadataFile *osFile) error {
return nil
}
func reloadPool(poolName string, dataFile, metadataFile *osFile) error {
func reloadPool(poolName string, dataFile, metadataFile *os.File) error {
task, err := createTask(DeviceReload, poolName)
if task == nil {
return err
@ -351,16 +366,16 @@ func reloadPool(poolName string, dataFile, metadataFile *osFile) error {
size, err := GetBlockDeviceSize(dataFile)
if err != nil {
return fmt.Errorf("Can't get data size")
return fmt.Errorf("Can't get data size %s", err)
}
params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768"
if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil {
return fmt.Errorf("Can't add target")
return fmt.Errorf("Can't add target %s", err)
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceCreate")
return fmt.Errorf("Error running DeviceCreate %s", err)
}
return nil
@ -388,6 +403,17 @@ func getInfo(name string) (*Info, error) {
return task.GetInfo()
}
func getDriverVersion() (string, error) {
task := TaskCreate(DeviceVersion)
if task == nil {
return "", fmt.Errorf("Can't create DeviceVersion task")
}
if err := task.Run(); err != nil {
return "", err
}
return task.GetDriverVersion()
}
func getStatus(name string) (uint64, uint64, string, string, error) {
task, err := createTask(DeviceStatus, name)
if task == nil {
@ -420,15 +446,15 @@ func setTransactionId(poolName string, oldId uint64, newId uint64) error {
}
if err := task.SetSector(0); err != nil {
return fmt.Errorf("Can't set sector")
return fmt.Errorf("Can't set sector %s", err)
}
if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil {
return fmt.Errorf("Can't set message")
return fmt.Errorf("Can't set message %s", err)
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running setTransactionId")
return fmt.Errorf("Error running setTransactionId %s", err)
}
return nil
}
@ -439,7 +465,7 @@ func suspendDevice(name string) error {
return err
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceSuspend: %s", err)
return fmt.Errorf("Error running DeviceSuspend %s", err)
}
return nil
}
@ -452,11 +478,11 @@ func resumeDevice(name string) error {
var cookie uint = 0
if err := task.SetCookie(&cookie, 0); err != nil {
return fmt.Errorf("Can't set cookie")
return fmt.Errorf("Can't set cookie %s", err)
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceResume")
return fmt.Errorf("Error running DeviceResume %s", err)
}
UdevWait(cookie)
@ -464,23 +490,33 @@ func resumeDevice(name string) error {
return nil
}
func createDevice(poolName string, deviceId int) error {
utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, deviceId)
task, err := createTask(DeviceTargetMsg, poolName)
if task == nil {
return err
}
func createDevice(poolName string, deviceId *int) error {
utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
if err := task.SetSector(0); err != nil {
return fmt.Errorf("Can't set sector")
}
for {
task, err := createTask(DeviceTargetMsg, poolName)
if task == nil {
return err
}
if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil {
return fmt.Errorf("Can't set message")
}
if err := task.SetSector(0); err != nil {
return fmt.Errorf("Can't set sector %s", err)
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running createDevice")
if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil {
return fmt.Errorf("Can't set message %s", err)
}
dmSawExist = false
if err := task.Run(); err != nil {
if dmSawExist {
// Already exists, try next id
*deviceId++
continue
}
return fmt.Errorf("Error running createDevice %s", err)
}
break
}
return nil
}
@ -492,15 +528,15 @@ func deleteDevice(poolName string, deviceId int) error {
}
if err := task.SetSector(0); err != nil {
return fmt.Errorf("Can't set sector")
return fmt.Errorf("Can't set sector %s", err)
}
if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil {
return fmt.Errorf("Can't set message")
return fmt.Errorf("Can't set message %s", err)
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running deleteDevice")
return fmt.Errorf("Error running deleteDevice %s", err)
}
return nil
}
@ -512,8 +548,12 @@ func removeDevice(name string) error {
if task == nil {
return err
}
dmSawBusy = false
if err = task.Run(); err != nil {
return fmt.Errorf("Error running removeDevice")
if dmSawBusy {
return ErrBusy
}
return fmt.Errorf("Error running removeDevice %s", err)
}
return nil
}
@ -526,19 +566,19 @@ func activateDevice(poolName string, name string, deviceId int, size uint64) err
params := fmt.Sprintf("%s %d", poolName, deviceId)
if err := task.AddTarget(0, size/512, "thin", params); err != nil {
return fmt.Errorf("Can't add target")
return fmt.Errorf("Can't add target %s", err)
}
if err := task.SetAddNode(AddNodeOnCreate); err != nil {
return fmt.Errorf("Can't add node")
return fmt.Errorf("Can't add node %s", err)
}
var cookie uint = 0
if err := task.SetCookie(&cookie, 0); err != nil {
return fmt.Errorf("Can't set cookie")
return fmt.Errorf("Can't set cookie %s", err)
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceCreate (activateDevice)")
return fmt.Errorf("Error running DeviceCreate (activateDevice) %s", err)
}
UdevWait(cookie)
@ -546,7 +586,7 @@ func activateDevice(poolName string, name string, deviceId int, size uint64) err
return nil
}
func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error {
func createSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error {
devinfo, _ := getInfo(baseName)
doSuspend := devinfo != nil && devinfo.Exists != 0
@ -556,33 +596,44 @@ func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseNa
}
}
task, err := createTask(DeviceTargetMsg, poolName)
if task == nil {
if doSuspend {
resumeDevice(baseName)
for {
task, err := createTask(DeviceTargetMsg, poolName)
if task == nil {
if doSuspend {
resumeDevice(baseName)
}
return err
}
return err
}
if err := task.SetSector(0); err != nil {
if doSuspend {
resumeDevice(baseName)
if err := task.SetSector(0); err != nil {
if doSuspend {
resumeDevice(baseName)
}
return fmt.Errorf("Can't set sector %s", err)
}
return fmt.Errorf("Can't set sector")
}
if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil {
if doSuspend {
resumeDevice(baseName)
if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil {
if doSuspend {
resumeDevice(baseName)
}
return fmt.Errorf("Can't set message %s", err)
}
return fmt.Errorf("Can't set message")
}
if err := task.Run(); err != nil {
if doSuspend {
resumeDevice(baseName)
dmSawExist = false
if err := task.Run(); err != nil {
if dmSawExist {
// Already exists, try next id
*deviceId++
continue
}
if doSuspend {
resumeDevice(baseName)
}
return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err)
}
return fmt.Errorf("Error running DeviceCreate (createSnapDevice)")
break
}
if doSuspend {

View file

@ -4,12 +4,27 @@ package devmapper
import "C"
import (
"strings"
)
// Due to the way cgo works this has to be in a separate file, as devmapper.go has
// definitions in the cgo block, which is incompatible with using "//export"
//export DevmapperLogCallback
func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) {
msg := C.GoString(message)
if level < 7 {
if strings.Contains(msg, "busy") {
dmSawBusy = true
}
if strings.Contains(msg, "File exists") {
dmSawExist = true
}
}
if dmLogger != nil {
dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), C.GoString(message))
dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg)
}
}

View file

@ -3,285 +3,35 @@
package devmapper
import (
"github.com/dotcloud/docker/daemon/graphdriver/graphtest"
"testing"
)
func TestTaskCreate(t *testing.T) {
t.Skip("FIXME: not a unit test")
// Test success
taskCreate(t, DeviceInfo)
// Test Failure
DmTaskCreate = dmTaskCreateFail
defer func() { DmTaskCreate = dmTaskCreateFct }()
if task := TaskCreate(-1); task != nil {
t.Fatalf("An error should have occured while creating an invalid task.")
}
func init() {
// Reduce the size the the base fs and loopback for the tests
DefaultDataLoopbackSize = 300 * 1024 * 1024
DefaultMetaDataLoopbackSize = 200 * 1024 * 1024
DefaultBaseFsSize = 300 * 1024 * 1024
}
func TestTaskRun(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
// Perform the RUN
if err := task.Run(); err != nil {
t.Fatal(err)
}
// Make sure we don't have error with GetInfo
if _, err := task.GetInfo(); err != nil {
t.Fatal(err)
}
// Test failure
DmTaskRun = dmTaskRunFail
defer func() { DmTaskRun = dmTaskRunFct }()
task = taskCreate(t, DeviceInfo)
// Perform the RUN
if err := task.Run(); err != ErrTaskRun {
t.Fatalf("An error should have occured while running task.")
}
// Make sure GetInfo also fails
if _, err := task.GetInfo(); err != ErrTaskGetInfo {
t.Fatalf("GetInfo should fail if task.Run() failed.")
}
// This avoids creating a new driver for each test if all tests are run
// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown
func TestDevmapperSetup(t *testing.T) {
graphtest.GetDriver(t, "devicemapper")
}
func TestTaskSetName(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
if err := task.SetName("test"); err != nil {
t.Fatal(err)
}
// Test failure
DmTaskSetName = dmTaskSetNameFail
defer func() { DmTaskSetName = dmTaskSetNameFct }()
if err := task.SetName("test"); err != ErrTaskSetName {
t.Fatalf("An error should have occured while runnign SetName.")
}
func TestDevmapperCreateEmpty(t *testing.T) {
graphtest.DriverTestCreateEmpty(t, "devicemapper")
}
func TestTaskSetMessage(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
if err := task.SetMessage("test"); err != nil {
t.Fatal(err)
}
// Test failure
DmTaskSetMessage = dmTaskSetMessageFail
defer func() { DmTaskSetMessage = dmTaskSetMessageFct }()
if err := task.SetMessage("test"); err != ErrTaskSetMessage {
t.Fatalf("An error should have occured while runnign SetMessage.")
}
func TestDevmapperCreateBase(t *testing.T) {
graphtest.DriverTestCreateBase(t, "devicemapper")
}
func TestTaskSetSector(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
if err := task.SetSector(128); err != nil {
t.Fatal(err)
}
DmTaskSetSector = dmTaskSetSectorFail
defer func() { DmTaskSetSector = dmTaskSetSectorFct }()
// Test failure
if err := task.SetSector(0); err != ErrTaskSetSector {
t.Fatalf("An error should have occured while running SetSector.")
}
func TestDevmapperCreateSnap(t *testing.T) {
graphtest.DriverTestCreateSnap(t, "devicemapper")
}
func TestTaskSetCookie(t *testing.T) {
t.Skip("FIXME: not a unit test")
var (
cookie uint = 0
task = taskCreate(t, DeviceInfo)
)
// Test success
if err := task.SetCookie(&cookie, 0); err != nil {
t.Fatal(err)
}
// Test failure
if err := task.SetCookie(nil, 0); err != ErrNilCookie {
t.Fatalf("An error should have occured while running SetCookie with nil cookie.")
}
DmTaskSetCookie = dmTaskSetCookieFail
defer func() { DmTaskSetCookie = dmTaskSetCookieFct }()
if err := task.SetCookie(&cookie, 0); err != ErrTaskSetCookie {
t.Fatalf("An error should have occured while running SetCookie.")
}
}
func TestTaskSetAddNode(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
if err := task.SetAddNode(0); err != nil {
t.Fatal(err)
}
// Test failure
if err := task.SetAddNode(-1); err != ErrInvalidAddNode {
t.Fatalf("An error should have occured running SetAddNode with wrong node.")
}
DmTaskSetAddNode = dmTaskSetAddNodeFail
defer func() { DmTaskSetAddNode = dmTaskSetAddNodeFct }()
if err := task.SetAddNode(0); err != ErrTaskSetAddNode {
t.Fatalf("An error should have occured running SetAddNode.")
}
}
func TestTaskSetRo(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
if err := task.SetRo(); err != nil {
t.Fatal(err)
}
// Test failure
DmTaskSetRo = dmTaskSetRoFail
defer func() { DmTaskSetRo = dmTaskSetRoFct }()
if err := task.SetRo(); err != ErrTaskSetRo {
t.Fatalf("An error should have occured running SetRo.")
}
}
func TestTaskAddTarget(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
if err := task.AddTarget(0, 128, "thinp", ""); err != nil {
t.Fatal(err)
}
// Test failure
DmTaskAddTarget = dmTaskAddTargetFail
defer func() { DmTaskAddTarget = dmTaskAddTargetFct }()
if err := task.AddTarget(0, 128, "thinp", ""); err != ErrTaskAddTarget {
t.Fatalf("An error should have occured running AddTarget.")
}
}
// func TestTaskGetInfo(t *testing.T) {
// task := taskCreate(t, DeviceInfo)
// // Test success
// if _, err := task.GetInfo(); err != nil {
// t.Fatal(err)
// }
// // Test failure
// DmTaskGetInfo = dmTaskGetInfoFail
// defer func() { DmTaskGetInfo = dmTaskGetInfoFct }()
// if _, err := task.GetInfo(); err != ErrTaskGetInfo {
// t.Fatalf("An error should have occured running GetInfo.")
// }
// }
// func TestTaskGetNextTarget(t *testing.T) {
// task := taskCreate(t, DeviceInfo)
// if next, _, _, _, _ := task.GetNextTarget(0); next == 0 {
// t.Fatalf("The next target should not be 0.")
// }
// }
/// Utils
func taskCreate(t *testing.T, taskType TaskType) *Task {
task := TaskCreate(taskType)
if task == nil {
t.Fatalf("Error creating task")
}
return task
}
/// Failure function replacement
func dmTaskCreateFail(t int) *CDmTask {
return nil
}
func dmTaskRunFail(task *CDmTask) int {
return -1
}
func dmTaskSetNameFail(task *CDmTask, name string) int {
return -1
}
func dmTaskSetMessageFail(task *CDmTask, message string) int {
return -1
}
func dmTaskSetSectorFail(task *CDmTask, sector uint64) int {
return -1
}
func dmTaskSetCookieFail(task *CDmTask, cookie *uint, flags uint16) int {
return -1
}
func dmTaskSetAddNodeFail(task *CDmTask, addNode AddNodeType) int {
return -1
}
func dmTaskSetRoFail(task *CDmTask) int {
return -1
}
func dmTaskAddTargetFail(task *CDmTask,
start, size uint64, ttype, params string) int {
return -1
}
func dmTaskGetInfoFail(task *CDmTask, info *Info) int {
return -1
}
func dmGetNextTargetFail(task *CDmTask, next uintptr, start, length *uint64,
target, params *string) uintptr {
return 0
}
func dmAttachLoopDeviceFail(filename string, fd *int) string {
return ""
}
func sysGetBlockSizeFail(fd uintptr, size *uint64) sysErrno {
return 1
}
func dmUdevWaitFail(cookie uint) int {
return -1
}
func dmSetDevDirFail(dir string) int {
return -1
}
func dmGetLibraryVersionFail(version *string) int {
return -1
func TestDevmapperTeardown(t *testing.T) {
graphtest.PutDriver(t)
}

View file

@ -85,23 +85,24 @@ const (
)
var (
DmGetLibraryVersion = dmGetLibraryVersionFct
DmGetNextTarget = dmGetNextTargetFct
DmLogInitVerbose = dmLogInitVerboseFct
DmSetDevDir = dmSetDevDirFct
DmTaskAddTarget = dmTaskAddTargetFct
DmTaskCreate = dmTaskCreateFct
DmTaskDestroy = dmTaskDestroyFct
DmTaskGetInfo = dmTaskGetInfoFct
DmTaskRun = dmTaskRunFct
DmTaskSetAddNode = dmTaskSetAddNodeFct
DmTaskSetCookie = dmTaskSetCookieFct
DmTaskSetMessage = dmTaskSetMessageFct
DmTaskSetName = dmTaskSetNameFct
DmTaskSetRo = dmTaskSetRoFct
DmTaskSetSector = dmTaskSetSectorFct
DmUdevWait = dmUdevWaitFct
LogWithErrnoInit = logWithErrnoInitFct
DmGetLibraryVersion = dmGetLibraryVersionFct
DmGetNextTarget = dmGetNextTargetFct
DmLogInitVerbose = dmLogInitVerboseFct
DmSetDevDir = dmSetDevDirFct
DmTaskAddTarget = dmTaskAddTargetFct
DmTaskCreate = dmTaskCreateFct
DmTaskDestroy = dmTaskDestroyFct
DmTaskGetInfo = dmTaskGetInfoFct
DmTaskGetDriverVersion = dmTaskGetDriverVersionFct
DmTaskRun = dmTaskRunFct
DmTaskSetAddNode = dmTaskSetAddNodeFct
DmTaskSetCookie = dmTaskSetCookieFct
DmTaskSetMessage = dmTaskSetMessageFct
DmTaskSetName = dmTaskSetNameFct
DmTaskSetRo = dmTaskSetRoFct
DmTaskSetSector = dmTaskSetSectorFct
DmUdevWait = dmUdevWaitFct
LogWithErrnoInit = logWithErrnoInitFct
)
func free(p *C.char) {
@ -184,6 +185,16 @@ func dmTaskGetInfoFct(task *CDmTask, info *Info) int {
return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
}
func dmTaskGetDriverVersionFct(task *CDmTask) string {
buffer := C.malloc(128)
defer C.free(buffer)
res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128)
if res == 0 {
return ""
}
return C.GoString((*C.char)(buffer))
}
func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr {
var (
Cstart, Clength C.uint64_t

View file

@ -9,6 +9,7 @@ import (
"path"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/pkg/mount"
"github.com/dotcloud/docker/utils"
)
@ -26,15 +27,21 @@ type Driver struct {
home string
}
var Init = func(home string) (graphdriver.Driver, error) {
deviceSet, err := NewDeviceSet(home, true)
func Init(home string, options []string) (graphdriver.Driver, error) {
deviceSet, err := NewDeviceSet(home, true, options)
if err != nil {
return nil, err
}
if err := graphdriver.MakePrivate(home); err != nil {
return nil, err
}
d := &Driver{
DeviceSet: deviceSet,
home: home,
}
return d, nil
}
@ -58,7 +65,13 @@ func (d *Driver) Status() [][2]string {
}
func (d *Driver) Cleanup() error {
return d.DeviceSet.Shutdown()
err := d.DeviceSet.Shutdown()
if err2 := mount.Unmount(d.home); err == nil {
err = err2
}
return err
}
func (d *Driver) Create(id, parent string) error {
@ -94,7 +107,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
mp := path.Join(d.home, "mnt", id)
// Create the target directories if they don't exist
if err := osMkdirAll(mp, 0755); err != nil && !osIsExist(err) {
if err := os.MkdirAll(mp, 0755); err != nil && !os.IsExist(err) {
return "", err
}
@ -104,13 +117,13 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
}
rootFs := path.Join(mp, "rootfs")
if err := osMkdirAll(rootFs, 0755); err != nil && !osIsExist(err) {
if err := os.MkdirAll(rootFs, 0755); err != nil && !os.IsExist(err) {
d.DeviceSet.UnmountDevice(id)
return "", err
}
idFile := path.Join(mp, "id")
if _, err := osStat(idFile); err != nil && osIsNotExist(err) {
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconscruct this in case
// of later problems
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {

View file

@ -1,880 +0,0 @@
// +build linux,amd64
package devmapper
import (
"fmt"
"github.com/dotcloud/docker/daemon/graphdriver"
"io/ioutil"
"path"
"runtime"
"strings"
"syscall"
"testing"
)
func init() {
// Reduce the size the the base fs and loopback for the tests
DefaultDataLoopbackSize = 300 * 1024 * 1024
DefaultMetaDataLoopbackSize = 200 * 1024 * 1024
DefaultBaseFsSize = 300 * 1024 * 1024
}
// denyAllDevmapper mocks all calls to libdevmapper in the unit tests, and denies them by default
func denyAllDevmapper() {
// Hijack all calls to libdevmapper with default panics.
// Authorized calls are selectively hijacked in each tests.
DmTaskCreate = func(t int) *CDmTask {
panic("DmTaskCreate: this method should not be called here")
}
DmTaskRun = func(task *CDmTask) int {
panic("DmTaskRun: this method should not be called here")
}
DmTaskSetName = func(task *CDmTask, name string) int {
panic("DmTaskSetName: this method should not be called here")
}
DmTaskSetMessage = func(task *CDmTask, message string) int {
panic("DmTaskSetMessage: this method should not be called here")
}
DmTaskSetSector = func(task *CDmTask, sector uint64) int {
panic("DmTaskSetSector: this method should not be called here")
}
DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int {
panic("DmTaskSetCookie: this method should not be called here")
}
DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int {
panic("DmTaskSetAddNode: this method should not be called here")
}
DmTaskSetRo = func(task *CDmTask) int {
panic("DmTaskSetRo: this method should not be called here")
}
DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
panic("DmTaskAddTarget: this method should not be called here")
}
DmTaskGetInfo = func(task *CDmTask, info *Info) int {
panic("DmTaskGetInfo: this method should not be called here")
}
DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr {
panic("DmGetNextTarget: this method should not be called here")
}
DmUdevWait = func(cookie uint) int {
panic("DmUdevWait: this method should not be called here")
}
DmSetDevDir = func(dir string) int {
panic("DmSetDevDir: this method should not be called here")
}
DmGetLibraryVersion = func(version *string) int {
panic("DmGetLibraryVersion: this method should not be called here")
}
DmLogInitVerbose = func(level int) {
panic("DmLogInitVerbose: this method should not be called here")
}
DmTaskDestroy = func(task *CDmTask) {
panic("DmTaskDestroy: this method should not be called here")
}
LogWithErrnoInit = func() {
panic("LogWithErrnoInit: this method should not be called here")
}
}
func denyAllSyscall() {
sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) {
panic("sysMount: this method should not be called here")
}
sysUnmount = func(target string, flags int) (err error) {
panic("sysUnmount: this method should not be called here")
}
sysCloseOnExec = func(fd int) {
panic("sysCloseOnExec: this method should not be called here")
}
sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
panic("sysSyscall: this method should not be called here")
}
// Not a syscall, but forbidding it here anyway
Mounted = func(mnt string) (bool, error) {
panic("devmapper.Mounted: this method should not be called here")
}
// osOpenFile = os.OpenFile
// osNewFile = os.NewFile
// osCreate = os.Create
// osStat = os.Stat
// osIsNotExist = os.IsNotExist
// osIsExist = os.IsExist
// osMkdirAll = os.MkdirAll
// osRemoveAll = os.RemoveAll
// osRename = os.Rename
// osReadlink = os.Readlink
// execRun = func(name string, args ...string) error {
// return exec.Command(name, args...).Run()
// }
}
func mkTestDirectory(t *testing.T) string {
dir, err := ioutil.TempDir("", "docker-test-devmapper-")
if err != nil {
t.Fatal(err)
}
return dir
}
func newDriver(t *testing.T) *Driver {
home := mkTestDirectory(t)
d, err := Init(home)
if err != nil {
t.Fatal(err)
}
return d.(*Driver)
}
func cleanup(d *Driver) {
d.Cleanup()
osRemoveAll(d.home)
}
type Set map[string]bool
func (r Set) Assert(t *testing.T, names ...string) {
for _, key := range names {
required := true
if strings.HasPrefix(key, "?") {
key = key[1:]
required = false
}
if _, exists := r[key]; !exists && required {
t.Fatalf("Key not set: %s", key)
}
delete(r, key)
}
if len(r) != 0 {
t.Fatalf("Unexpected keys: %v", r)
}
}
func TestInit(t *testing.T) {
var (
calls = make(Set)
taskMessages = make(Set)
taskTypes = make(Set)
home = mkTestDirectory(t)
)
defer osRemoveAll(home)
func() {
denyAllDevmapper()
DmSetDevDir = func(dir string) int {
calls["DmSetDevDir"] = true
expectedDir := "/dev"
if dir != expectedDir {
t.Fatalf("Wrong libdevmapper call\nExpected: DmSetDevDir(%v)\nReceived: DmSetDevDir(%v)\n", expectedDir, dir)
}
return 0
}
LogWithErrnoInit = func() {
calls["DmLogWithErrnoInit"] = true
}
var task1 CDmTask
DmTaskCreate = func(taskType int) *CDmTask {
calls["DmTaskCreate"] = true
taskTypes[fmt.Sprintf("%d", taskType)] = true
return &task1
}
DmTaskSetName = func(task *CDmTask, name string) int {
calls["DmTaskSetName"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", expectedTask, task)
}
// FIXME: use Set.AssertRegexp()
if !strings.HasPrefix(name, "docker-") && !strings.HasPrefix(name, "/dev/mapper/docker-") ||
!strings.HasSuffix(name, "-pool") && !strings.HasSuffix(name, "-base") {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", "docker-...-pool", name)
}
return 1
}
DmTaskRun = func(task *CDmTask) int {
calls["DmTaskRun"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskRun(%v)\nReceived: DmTaskRun(%v)\n", expectedTask, task)
}
return 1
}
DmTaskGetInfo = func(task *CDmTask, info *Info) int {
calls["DmTaskGetInfo"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskGetInfo(%v)\nReceived: DmTaskGetInfo(%v)\n", expectedTask, task)
}
// This will crash if info is not dereferenceable
info.Exists = 0
return 1
}
DmTaskSetSector = func(task *CDmTask, sector uint64) int {
calls["DmTaskSetSector"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task)
}
if expectedSector := uint64(0); sector != expectedSector {
t.Fatalf("Wrong libdevmapper call to DmTaskSetSector\nExpected: %v\nReceived: %v\n", expectedSector, sector)
}
return 1
}
DmTaskSetMessage = func(task *CDmTask, message string) int {
calls["DmTaskSetMessage"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task)
}
taskMessages[message] = true
return 1
}
DmTaskDestroy = func(task *CDmTask) {
calls["DmTaskDestroy"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
}
}
DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
calls["DmTaskSetTarget"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
}
if start != 0 {
t.Fatalf("Wrong start: %d != %d", start, 0)
}
if ttype != "thin" && ttype != "thin-pool" {
t.Fatalf("Wrong ttype: %s", ttype)
}
// Quick smoke test
if params == "" {
t.Fatalf("Params should not be empty")
}
return 1
}
fakeCookie := uint(4321)
DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int {
calls["DmTaskSetCookie"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
}
if flags != 0 {
t.Fatalf("Cookie flags should be 0 (not %x)", flags)
}
*cookie = fakeCookie
return 1
}
DmUdevWait = func(cookie uint) int {
calls["DmUdevWait"] = true
if cookie != fakeCookie {
t.Fatalf("Wrong cookie: %d != %d", cookie, fakeCookie)
}
return 1
}
DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int {
if addNode != AddNodeOnCreate {
t.Fatalf("Wrong AddNoteType: %v (expected %v)", addNode, AddNodeOnCreate)
}
calls["DmTaskSetAddNode"] = true
return 1
}
execRun = func(name string, args ...string) error {
calls["execRun"] = true
if name != "mkfs.ext4" {
t.Fatalf("Expected %s to be executed, not %s", "mkfs.ext4", name)
}
return nil
}
driver, err := Init(home)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := driver.Cleanup(); err != nil {
t.Fatal(err)
}
}()
}()
// Put all tests in a function to make sure the garbage collection will
// occur.
// Call GC to cleanup runtime.Finalizers
runtime.GC()
calls.Assert(t,
"DmSetDevDir",
"DmLogWithErrnoInit",
"DmTaskSetName",
"DmTaskRun",
"DmTaskGetInfo",
"DmTaskDestroy",
"execRun",
"DmTaskCreate",
"DmTaskSetTarget",
"DmTaskSetCookie",
"DmUdevWait",
"DmTaskSetSector",
"DmTaskSetMessage",
"DmTaskSetAddNode",
)
taskTypes.Assert(t, "0", "6", "17")
taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1")
}
func fakeInit() func(home string) (graphdriver.Driver, error) {
oldInit := Init
Init = func(home string) (graphdriver.Driver, error) {
return &Driver{
home: home,
}, nil
}
return oldInit
}
func restoreInit(init func(home string) (graphdriver.Driver, error)) {
Init = init
}
func mockAllDevmapper(calls Set) {
DmSetDevDir = func(dir string) int {
calls["DmSetDevDir"] = true
return 0
}
LogWithErrnoInit = func() {
calls["DmLogWithErrnoInit"] = true
}
DmTaskCreate = func(taskType int) *CDmTask {
calls["DmTaskCreate"] = true
return &CDmTask{}
}
DmTaskSetName = func(task *CDmTask, name string) int {
calls["DmTaskSetName"] = true
return 1
}
DmTaskRun = func(task *CDmTask) int {
calls["DmTaskRun"] = true
return 1
}
DmTaskGetInfo = func(task *CDmTask, info *Info) int {
calls["DmTaskGetInfo"] = true
return 1
}
DmTaskSetSector = func(task *CDmTask, sector uint64) int {
calls["DmTaskSetSector"] = true
return 1
}
DmTaskSetMessage = func(task *CDmTask, message string) int {
calls["DmTaskSetMessage"] = true
return 1
}
DmTaskDestroy = func(task *CDmTask) {
calls["DmTaskDestroy"] = true
}
DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
calls["DmTaskSetTarget"] = true
return 1
}
DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int {
calls["DmTaskSetCookie"] = true
return 1
}
DmUdevWait = func(cookie uint) int {
calls["DmUdevWait"] = true
return 1
}
DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int {
calls["DmTaskSetAddNode"] = true
return 1
}
execRun = func(name string, args ...string) error {
calls["execRun"] = true
return nil
}
}
func TestDriverName(t *testing.T) {
denyAllDevmapper()
defer denyAllDevmapper()
oldInit := fakeInit()
defer restoreInit(oldInit)
d := newDriver(t)
if d.String() != "devicemapper" {
t.Fatalf("Expected driver name to be devicemapper got %s", d.String())
}
}
func TestDriverCreate(t *testing.T) {
denyAllDevmapper()
denyAllSyscall()
defer denyAllSyscall()
defer denyAllDevmapper()
calls := make(Set)
mockAllDevmapper(calls)
sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) {
calls["sysMount"] = true
// FIXME: compare the exact source and target strings (inodes + devname)
if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source)
}
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
}
if expectedFstype := "ext4"; fstype != expectedFstype {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype)
}
if expectedFlags := uintptr(3236757504); flags != expectedFlags {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags)
}
return nil
}
sysUnmount = func(target string, flag int) error {
//calls["sysUnmount"] = true
return nil
}
Mounted = func(mnt string) (bool, error) {
calls["Mounted"] = true
if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") {
t.Fatalf("Wrong mounted call\nExpected: Mounted(%v)\nReceived: Mounted(%v)\n", "/tmp/docker-test-devmapper-.../mnt/1", mnt)
}
return false, nil
}
sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
calls["sysSyscall"] = true
if trap != sysSysIoctl {
t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap)
}
switch a2 {
case LoopSetFd:
calls["ioctl.loopsetfd"] = true
case LoopCtlGetFree:
calls["ioctl.loopctlgetfree"] = true
case LoopGetStatus64:
calls["ioctl.loopgetstatus"] = true
case LoopSetStatus64:
calls["ioctl.loopsetstatus"] = true
case LoopClrFd:
calls["ioctl.loopclrfd"] = true
case LoopSetCapacity:
calls["ioctl.loopsetcapacity"] = true
case BlkGetSize64:
calls["ioctl.blkgetsize"] = true
default:
t.Fatalf("Unexpected IOCTL. Received %d", a2)
}
return 0, 0, 0
}
func() {
d := newDriver(t)
calls.Assert(t,
"DmSetDevDir",
"DmLogWithErrnoInit",
"DmTaskSetName",
"DmTaskRun",
"DmTaskGetInfo",
"execRun",
"DmTaskCreate",
"DmTaskSetTarget",
"DmTaskSetCookie",
"DmUdevWait",
"DmTaskSetSector",
"DmTaskSetMessage",
"DmTaskSetAddNode",
"sysSyscall",
"ioctl.blkgetsize",
"ioctl.loopsetfd",
"ioctl.loopsetstatus",
"?ioctl.loopctlgetfree",
)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
calls.Assert(t,
"DmTaskCreate",
"DmTaskGetInfo",
"DmTaskRun",
"DmTaskSetSector",
"DmTaskSetName",
"DmTaskSetMessage",
)
}()
runtime.GC()
calls.Assert(t,
"DmTaskDestroy",
)
}
func TestDriverRemove(t *testing.T) {
denyAllDevmapper()
denyAllSyscall()
defer denyAllSyscall()
defer denyAllDevmapper()
calls := make(Set)
mockAllDevmapper(calls)
sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) {
calls["sysMount"] = true
// FIXME: compare the exact source and target strings (inodes + devname)
if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source)
}
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
}
if expectedFstype := "ext4"; fstype != expectedFstype {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype)
}
if expectedFlags := uintptr(3236757504); flags != expectedFlags {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags)
}
return nil
}
sysUnmount = func(target string, flags int) (err error) {
// FIXME: compare the exact source and target strings (inodes + devname)
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
}
if expectedFlags := 0; flags != expectedFlags {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags)
}
return nil
}
Mounted = func(mnt string) (bool, error) {
calls["Mounted"] = true
return false, nil
}
sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
calls["sysSyscall"] = true
if trap != sysSysIoctl {
t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap)
}
switch a2 {
case LoopSetFd:
calls["ioctl.loopsetfd"] = true
case LoopCtlGetFree:
calls["ioctl.loopctlgetfree"] = true
case LoopGetStatus64:
calls["ioctl.loopgetstatus"] = true
case LoopSetStatus64:
calls["ioctl.loopsetstatus"] = true
case LoopClrFd:
calls["ioctl.loopclrfd"] = true
case LoopSetCapacity:
calls["ioctl.loopsetcapacity"] = true
case BlkGetSize64:
calls["ioctl.blkgetsize"] = true
default:
t.Fatalf("Unexpected IOCTL. Received %d", a2)
}
return 0, 0, 0
}
func() {
d := newDriver(t)
calls.Assert(t,
"DmSetDevDir",
"DmLogWithErrnoInit",
"DmTaskSetName",
"DmTaskRun",
"DmTaskGetInfo",
"execRun",
"DmTaskCreate",
"DmTaskSetTarget",
"DmTaskSetCookie",
"DmUdevWait",
"DmTaskSetSector",
"DmTaskSetMessage",
"DmTaskSetAddNode",
"sysSyscall",
"ioctl.blkgetsize",
"ioctl.loopsetfd",
"ioctl.loopsetstatus",
"?ioctl.loopctlgetfree",
)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
calls.Assert(t,
"DmTaskCreate",
"DmTaskGetInfo",
"DmTaskRun",
"DmTaskSetSector",
"DmTaskSetName",
"DmTaskSetMessage",
)
Mounted = func(mnt string) (bool, error) {
calls["Mounted"] = true
return true, nil
}
if err := d.Remove("1"); err != nil {
t.Fatal(err)
}
calls.Assert(t,
"DmTaskRun",
"DmTaskSetSector",
"DmTaskSetName",
"DmTaskSetMessage",
"DmTaskCreate",
"DmTaskGetInfo",
"DmTaskSetCookie",
"DmTaskSetTarget",
"DmTaskSetAddNode",
"DmUdevWait",
)
}()
runtime.GC()
calls.Assert(t,
"DmTaskDestroy",
)
}
func TestCleanup(t *testing.T) {
t.Skip("FIXME: not a unit test")
t.Skip("Unimplemented")
d := newDriver(t)
defer osRemoveAll(d.home)
mountPoints := make([]string, 2)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
// Mount the id
p, err := d.Get("1", "")
if err != nil {
t.Fatal(err)
}
mountPoints[0] = p
if err := d.Create("2", "1"); err != nil {
t.Fatal(err)
}
p, err = d.Get("2", "")
if err != nil {
t.Fatal(err)
}
mountPoints[1] = p
// Ensure that all the mount points are currently mounted
for _, p := range mountPoints {
if mounted, err := Mounted(p); err != nil {
t.Fatal(err)
} else if !mounted {
t.Fatalf("Expected %s to be mounted", p)
}
}
// Ensure that devices are active
for _, p := range []string{"1", "2"} {
if !d.HasActivatedDevice(p) {
t.Fatalf("Expected %s to have an active device", p)
}
}
if err := d.Cleanup(); err != nil {
t.Fatal(err)
}
// Ensure that all the mount points are no longer mounted
for _, p := range mountPoints {
if mounted, err := Mounted(p); err != nil {
t.Fatal(err)
} else if mounted {
t.Fatalf("Expected %s to not be mounted", p)
}
}
// Ensure that devices are no longer activated
for _, p := range []string{"1", "2"} {
if d.HasActivatedDevice(p) {
t.Fatalf("Expected %s not be an active device", p)
}
}
}
func TestNotMounted(t *testing.T) {
t.Skip("FIXME: not a unit test")
t.Skip("Not implemented")
d := newDriver(t)
defer cleanup(d)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
mounted, err := Mounted(path.Join(d.home, "mnt", "1"))
if err != nil {
t.Fatal(err)
}
if mounted {
t.Fatal("Id 1 should not be mounted")
}
}
func TestMounted(t *testing.T) {
t.Skip("FIXME: not a unit test")
d := newDriver(t)
defer cleanup(d)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
if _, err := d.Get("1", ""); err != nil {
t.Fatal(err)
}
mounted, err := Mounted(path.Join(d.home, "mnt", "1"))
if err != nil {
t.Fatal(err)
}
if !mounted {
t.Fatal("Id 1 should be mounted")
}
}
func TestInitCleanedDriver(t *testing.T) {
t.Skip("FIXME: not a unit test")
d := newDriver(t)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
if _, err := d.Get("1", ""); err != nil {
t.Fatal(err)
}
if err := d.Cleanup(); err != nil {
t.Fatal(err)
}
driver, err := Init(d.home)
if err != nil {
t.Fatal(err)
}
d = driver.(*Driver)
defer cleanup(d)
if _, err := d.Get("1", ""); err != nil {
t.Fatal(err)
}
}
func TestMountMountedDriver(t *testing.T) {
t.Skip("FIXME: not a unit test")
d := newDriver(t)
defer cleanup(d)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
// Perform get on same id to ensure that it will
// not be mounted twice
if _, err := d.Get("1", ""); err != nil {
t.Fatal(err)
}
if _, err := d.Get("1", ""); err != nil {
t.Fatal(err)
}
}
func TestGetReturnsValidDevice(t *testing.T) {
t.Skip("FIXME: not a unit test")
d := newDriver(t)
defer cleanup(d)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
if !d.HasDevice("1") {
t.Fatalf("Expected id 1 to be in device set")
}
if _, err := d.Get("1", ""); err != nil {
t.Fatal(err)
}
if !d.HasActivatedDevice("1") {
t.Fatalf("Expected id 1 to be activated")
}
if !d.HasInitializedDevice("1") {
t.Fatalf("Expected id 1 to be initialized")
}
}
func TestDriverGetSize(t *testing.T) {
t.Skip("FIXME: not a unit test")
t.Skipf("Size is currently not implemented")
d := newDriver(t)
defer cleanup(d)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
mountPoint, err := d.Get("1", "")
if err != nil {
t.Fatal(err)
}
size := int64(1024)
f, err := osCreate(path.Join(mountPoint, "test_file"))
if err != nil {
t.Fatal(err)
}
if err := f.Truncate(size); err != nil {
t.Fatal(err)
}
f.Close()
// diffSize, err := d.DiffSize("1")
// if err != nil {
// t.Fatal(err)
// }
// if diffSize != size {
// t.Fatalf("Expected size %d got %d", size, diffSize)
// }
}
func assertMap(t *testing.T, m map[string]bool, keys ...string) {
for _, key := range keys {
if _, exists := m[key]; !exists {
t.Fatalf("Key not set: %s", key)
}
delete(m, key)
}
if len(m) != 0 {
t.Fatalf("Unexpected keys: %v", m)
}
}

View file

@ -3,11 +3,12 @@
package devmapper
import (
"syscall"
"unsafe"
)
func ioctlLoopCtlGetFree(fd uintptr) (int, error) {
index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0)
index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0)
if err != 0 {
return 0, err
}
@ -15,21 +16,21 @@ func ioctlLoopCtlGetFree(fd uintptr) (int, error) {
}
func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 {
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 {
return err
}
return nil
}
func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error {
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
return err
}
return nil
}
func ioctlLoopClrFd(loopFd uintptr) error {
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 {
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 {
return err
}
return nil
@ -38,14 +39,14 @@ func ioctlLoopClrFd(loopFd uintptr) error {
func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) {
loopInfo := &LoopInfo64{}
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
return nil, err
}
return loopInfo, nil
}
func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 {
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 {
return err
}
return nil
@ -53,7 +54,7 @@ func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
func ioctlBlkGetSize64(fd uintptr) (int64, error) {
var size int64
if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
return 0, err
}
return size, nil
@ -64,7 +65,7 @@ func ioctlBlkDiscard(fd uintptr, offset, length uint64) error {
r[0] = offset
r[1] = length
if _, _, err := sysSyscall(sysSysIoctl, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 {
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 {
return err
}
return nil

View file

@ -3,25 +3,84 @@
package devmapper
import (
"bytes"
"fmt"
"os"
"path/filepath"
"syscall"
)
// FIXME: this is copy-pasted from the aufs driver.
// It should be moved into the core.
var Mounted = func(mountpoint string) (bool, error) {
mntpoint, err := osStat(mountpoint)
func Mounted(mountpoint string) (bool, error) {
mntpoint, err := os.Stat(mountpoint)
if err != nil {
if osIsNotExist(err) {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
parent, err := osStat(filepath.Join(mountpoint, ".."))
parent, err := os.Stat(filepath.Join(mountpoint, ".."))
if err != nil {
return false, err
}
mntpointSt := toSysStatT(mntpoint.Sys())
parentSt := toSysStatT(parent.Sys())
mntpointSt := mntpoint.Sys().(*syscall.Stat_t)
parentSt := parent.Sys().(*syscall.Stat_t)
return mntpointSt.Dev != parentSt.Dev, nil
}
type probeData struct {
fsName string
magic string
offset uint64
}
func ProbeFsType(device string) (string, error) {
probes := []probeData{
{"btrfs", "_BHRfS_M", 0x10040},
{"ext4", "\123\357", 0x438},
{"xfs", "XFSB", 0},
}
maxLen := uint64(0)
for _, p := range probes {
l := p.offset + uint64(len(p.magic))
if l > maxLen {
maxLen = l
}
}
file, err := os.Open(device)
if err != nil {
return "", err
}
buffer := make([]byte, maxLen)
l, err := file.Read(buffer)
if err != nil {
return "", err
}
file.Close()
if uint64(l) != maxLen {
return "", fmt.Errorf("unable to detect filesystem type of %s, short read", device)
}
for _, p := range probes {
if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) {
return p.fsName, nil
}
}
return "", fmt.Errorf("Unknown filesystem type on %s", device)
}
func joinMountOptions(a, b string) string {
if a == "" {
return b
}
if b == "" {
return a
}
return a + "," + b
}

View file

@ -1,57 +0,0 @@
// +build linux,amd64
package devmapper
import (
"os"
"os/exec"
"syscall"
)
type (
sysStatT syscall.Stat_t
sysErrno syscall.Errno
osFile struct{ *os.File }
)
var (
sysMount = syscall.Mount
sysUnmount = syscall.Unmount
sysCloseOnExec = syscall.CloseOnExec
sysSyscall = syscall.Syscall
osOpenFile = func(name string, flag int, perm os.FileMode) (*osFile, error) {
f, err := os.OpenFile(name, flag, perm)
return &osFile{File: f}, err
}
osOpen = func(name string) (*osFile, error) { f, err := os.Open(name); return &osFile{File: f}, err }
osNewFile = os.NewFile
osCreate = os.Create
osStat = os.Stat
osIsNotExist = os.IsNotExist
osIsExist = os.IsExist
osMkdirAll = os.MkdirAll
osRemoveAll = os.RemoveAll
osRename = os.Rename
osReadlink = os.Readlink
execRun = func(name string, args ...string) error { return exec.Command(name, args...).Run() }
)
const (
sysMsMgcVal = syscall.MS_MGC_VAL
sysMsRdOnly = syscall.MS_RDONLY
sysEInval = syscall.EINVAL
sysSysIoctl = syscall.SYS_IOCTL
sysEBusy = syscall.EBUSY
osORdOnly = os.O_RDONLY
osORdWr = os.O_RDWR
osOCreate = os.O_CREATE
osModeDevice = os.ModeDevice
)
func toSysStatT(i interface{}) *sysStatT {
return (*sysStatT)(i.(*syscall.Stat_t))
}

View file

@ -1,14 +1,23 @@
package graphdriver
import (
"errors"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/utils"
"os"
"path"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/pkg/mount"
)
type InitFunc func(root string) (Driver, error)
type FsMagic uint64
const (
FsMagicBtrfs = FsMagic(0x9123683E)
FsMagicAufs = FsMagic(0x61756673)
)
type InitFunc func(root string, options []string) (Driver, error)
type Driver interface {
String() string
@ -43,6 +52,10 @@ var (
"devicemapper",
"vfs",
}
ErrNotSupported = errors.New("driver not supported")
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
)
func init() {
@ -58,35 +71,56 @@ func Register(name string, initFunc InitFunc) error {
return nil
}
func GetDriver(name, home string) (Driver, error) {
func GetDriver(name, home string, options []string) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(path.Join(home, name))
return initFunc(path.Join(home, name), options)
}
return nil, fmt.Errorf("No such driver: %s", name)
return nil, ErrNotSupported
}
func New(root string) (driver Driver, err error) {
func New(root string, options []string) (driver Driver, err error) {
for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} {
if name != "" {
return GetDriver(name, root)
return GetDriver(name, root, options)
}
}
// Check for priority drivers first
for _, name := range priority {
if driver, err = GetDriver(name, root); err != nil {
utils.Debugf("Error loading driver %s: %s", name, err)
continue
driver, err = GetDriver(name, root, options)
if err != nil {
if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS {
continue
}
return nil, err
}
return driver, nil
}
// Check all registered drivers if no priority driver is found
for _, initFunc := range drivers {
if driver, err = initFunc(root); err != nil {
continue
if driver, err = initFunc(root, options); err != nil {
if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS {
continue
}
return nil, err
}
return driver, nil
}
return nil, err
return nil, fmt.Errorf("No supported storage backend found")
}
func MakePrivate(mountPoint string) error {
mounted, err := mount.Mounted(mountPoint)
if err != nil {
return err
}
if !mounted {
if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
return err
}
}
return mount.ForceMount("", mountPoint, "none", "private")
}

View file

@ -0,0 +1,228 @@
package graphtest
import (
"github.com/dotcloud/docker/daemon/graphdriver"
"io/ioutil"
"os"
"path"
"syscall"
"testing"
)
var (
drv *Driver
)
type Driver struct {
graphdriver.Driver
root string
refCount int
}
func newDriver(t *testing.T, name string) *Driver {
root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-")
if err != nil {
t.Fatal(err)
}
if err := os.MkdirAll(root, 0755); err != nil {
t.Fatal(err)
}
d, err := graphdriver.GetDriver(name, root, nil)
if err != nil {
if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites {
t.Skip("Driver %s not supported", name)
}
t.Fatal(err)
}
return &Driver{d, root, 1}
}
func cleanup(t *testing.T, d *Driver) {
if err := drv.Cleanup(); err != nil {
t.Fatal(err)
}
os.RemoveAll(d.root)
}
func GetDriver(t *testing.T, name string) graphdriver.Driver {
if drv == nil {
drv = newDriver(t, name)
} else {
drv.refCount++
}
return drv
}
func PutDriver(t *testing.T) {
if drv == nil {
t.Skip("No driver to put!")
}
drv.refCount--
if drv.refCount == 0 {
cleanup(t, drv)
drv = nil
}
}
func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) {
fi, err := os.Stat(path)
if err != nil {
t.Fatal(err)
}
if fi.Mode()&os.ModeType != mode&os.ModeType {
t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType)
}
if fi.Mode()&os.ModePerm != mode&os.ModePerm {
t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm)
}
if fi.Mode()&os.ModeSticky != mode&os.ModeSticky {
t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky)
}
if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid {
t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid)
}
if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid {
t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid)
}
if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
if stat.Uid != uid {
t.Fatal("%s no owned by uid %d", path, uid)
}
if stat.Gid != gid {
t.Fatal("%s not owned by gid %d", path, gid)
}
}
}
// Creates an new image and verifies it is empty and the right metadata
func DriverTestCreateEmpty(t *testing.T, drivername string) {
driver := GetDriver(t, drivername)
defer PutDriver(t)
if err := driver.Create("empty", ""); err != nil {
t.Fatal(err)
}
if !driver.Exists("empty") {
t.Fatal("Newly created image doesn't exist")
}
dir, err := driver.Get("empty", "")
if err != nil {
t.Fatal(err)
}
verifyFile(t, dir, 0755|os.ModeDir, 0, 0)
// Verify that the directory is empty
fis, err := ioutil.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
if len(fis) != 0 {
t.Fatal("New directory not empty")
}
driver.Put("empty")
if err := driver.Remove("empty"); err != nil {
t.Fatal(err)
}
}
func createBase(t *testing.T, driver graphdriver.Driver, name string) {
// We need to be able to set any perms
oldmask := syscall.Umask(0)
defer syscall.Umask(oldmask)
if err := driver.Create(name, ""); err != nil {
t.Fatal(err)
}
dir, err := driver.Get(name, "")
if err != nil {
t.Fatal(err)
}
defer driver.Put(name)
subdir := path.Join(dir, "a subdir")
if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil {
t.Fatal(err)
}
if err := os.Chown(subdir, 1, 2); err != nil {
t.Fatal(err)
}
file := path.Join(dir, "a file")
if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil {
t.Fatal(err)
}
}
func verifyBase(t *testing.T, driver graphdriver.Driver, name string) {
dir, err := driver.Get(name, "")
if err != nil {
t.Fatal(err)
}
defer driver.Put(name)
subdir := path.Join(dir, "a subdir")
verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2)
file := path.Join(dir, "a file")
verifyFile(t, file, 0222|os.ModeSetuid, 0, 0)
fis, err := ioutil.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
if len(fis) != 2 {
t.Fatal("Unexpected files in base image")
}
}
func DriverTestCreateBase(t *testing.T, drivername string) {
driver := GetDriver(t, drivername)
defer PutDriver(t)
createBase(t, driver, "Base")
verifyBase(t, driver, "Base")
if err := driver.Remove("Base"); err != nil {
t.Fatal(err)
}
}
func DriverTestCreateSnap(t *testing.T, drivername string) {
driver := GetDriver(t, drivername)
defer PutDriver(t)
createBase(t, driver, "Base")
if err := driver.Create("Snap", "Base"); err != nil {
t.Fatal(err)
}
verifyBase(t, driver, "Snap")
if err := driver.Remove("Snap"); err != nil {
t.Fatal(err)
}
if err := driver.Remove("Base"); err != nil {
t.Fatal(err)
}
}

View file

@ -12,7 +12,7 @@ func init() {
graphdriver.Register("vfs", Init)
}
func Init(home string) (graphdriver.Driver, error) {
func Init(home string, options []string) (graphdriver.Driver, error) {
d := &Driver{
home: home,
}
@ -47,7 +47,7 @@ func (d *Driver) Create(id, parent string) error {
if err := os.MkdirAll(path.Dir(dir), 0700); err != nil {
return err
}
if err := os.Mkdir(dir, 0700); err != nil {
if err := os.Mkdir(dir, 0755); err != nil {
return err
}
if parent == "" {

View file

@ -0,0 +1,28 @@
package vfs
import (
"github.com/dotcloud/docker/daemon/graphdriver/graphtest"
"testing"
)
// This avoids creating a new driver for each test if all tests are run
// Make sure to put new tests between TestVfsSetup and TestVfsTeardown
func TestVfsSetup(t *testing.T) {
graphtest.GetDriver(t, "vfs")
}
func TestVfsCreateEmpty(t *testing.T) {
graphtest.DriverTestCreateEmpty(t, "vfs")
}
func TestVfsCreateBase(t *testing.T) {
graphtest.DriverTestCreateBase(t, "vfs")
}
func TestVfsCreateSnap(t *testing.T) {
graphtest.DriverTestCreateSnap(t, "vfs")
}
func TestVfsTeardown(t *testing.T) {
graphtest.PutDriver(t)
}

View file

@ -26,5 +26,8 @@ func (history *History) Swap(i, j int) {
func (history *History) Add(container *Container) {
*history = append(*history, container)
}
func (history *History) Sort() {
sort.Sort(history)
}

54
daemon/inspect.go Normal file
View file

@ -0,0 +1,54 @@
package daemon
import (
"encoding/json"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/runconfig"
)
func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
if len(job.Args) != 1 {
return job.Errorf("usage: %s NAME", job.Name)
}
name := job.Args[0]
if container := daemon.Get(name); container != nil {
if job.GetenvBool("dirty") {
b, err := json.Marshal(&struct {
*Container
HostConfig *runconfig.HostConfig
}{container, container.HostConfig()})
if err != nil {
return job.Error(err)
}
job.Stdout.Write(b)
return engine.StatusOK
}
out := &engine.Env{}
out.Set("Id", container.ID)
out.SetAuto("Created", container.Created)
out.Set("Path", container.Path)
out.SetList("Args", container.Args)
out.SetJson("Config", container.Config)
out.SetJson("State", container.State)
out.Set("Image", container.Image)
out.SetJson("NetworkSettings", container.NetworkSettings)
out.Set("ResolvConfPath", container.ResolvConfPath)
out.Set("HostnamePath", container.HostnamePath)
out.Set("HostsPath", container.HostsPath)
out.Set("Name", container.Name)
out.Set("Driver", container.Driver)
out.Set("ExecDriver", container.ExecDriver)
out.Set("MountLabel", container.MountLabel)
out.Set("ProcessLabel", container.ProcessLabel)
out.SetJson("Volumes", container.Volumes)
out.SetJson("VolumesRW", container.VolumesRW)
out.SetJson("HostConfig", container.hostConfig)
if _, err := out.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
}
return job.Errorf("No such container: %s", name)
}

View file

@ -23,7 +23,7 @@ func (settings *NetworkSettings) PortMappingAPI() *engine.Table {
p, _ := nat.ParsePort(port.Port())
if len(bindings) == 0 {
out := &engine.Env{}
out.SetInt("PublicPort", p)
out.SetInt("PrivatePort", p)
out.Set("Type", port.Proto())
outs.Add(out)
continue

View file

@ -6,6 +6,7 @@ import (
"log"
"net"
"strings"
"sync"
"github.com/dotcloud/docker/daemon/networkdriver"
"github.com/dotcloud/docker/daemon/networkdriver/ipallocator"
@ -28,6 +29,24 @@ type networkInterface struct {
PortMappings []net.Addr // there are mappings to the host interfaces
}
type ifaces struct {
c map[string]*networkInterface
sync.Mutex
}
func (i *ifaces) Set(key string, n *networkInterface) {
i.Lock()
i.c[key] = n
i.Unlock()
}
func (i *ifaces) Get(key string) *networkInterface {
i.Lock()
res := i.c[key]
i.Unlock()
return res
}
var (
addrs = []string{
// Here we don't follow the convention of using the 1st IP of the range for the gateway.
@ -53,7 +72,7 @@ var (
bridgeNetwork *net.IPNet
defaultBindingIP = net.ParseIP("0.0.0.0")
currentInterfaces = make(map[string]*networkInterface)
currentInterfaces = ifaces{c: make(map[string]*networkInterface)}
)
func InitDriver(job *engine.Job) engine.Status {
@ -321,9 +340,9 @@ func Allocate(job *engine.Job) engine.Status {
size, _ := bridgeNetwork.Mask.Size()
out.SetInt("IPPrefixLen", size)
currentInterfaces[id] = &networkInterface{
currentInterfaces.Set(id, &networkInterface{
IP: *ip,
}
})
out.WriteTo(job.Stdout)
@ -334,7 +353,7 @@ func Allocate(job *engine.Job) engine.Status {
func Release(job *engine.Job) engine.Status {
var (
id = job.Args[0]
containerInterface = currentInterfaces[id]
containerInterface = currentInterfaces.Get(id)
ip net.IP
port int
proto string
@ -380,39 +399,55 @@ func AllocatePort(job *engine.Job) engine.Status {
ip = defaultBindingIP
id = job.Args[0]
hostIP = job.Getenv("HostIP")
hostPort = job.GetenvInt("HostPort")
origHostPort = job.GetenvInt("HostPort")
containerPort = job.GetenvInt("ContainerPort")
proto = job.Getenv("Proto")
network = currentInterfaces[id]
network = currentInterfaces.Get(id)
)
if hostIP != "" {
ip = net.ParseIP(hostIP)
}
// host ip, proto, and host port
hostPort, err = portallocator.RequestPort(ip, proto, hostPort)
if err != nil {
return job.Error(err)
}
var (
hostPort int
container net.Addr
host net.Addr
)
if proto == "tcp" {
host = &net.TCPAddr{IP: ip, Port: hostPort}
container = &net.TCPAddr{IP: network.IP, Port: containerPort}
} else {
host = &net.UDPAddr{IP: ip, Port: hostPort}
container = &net.UDPAddr{IP: network.IP, Port: containerPort}
/*
Try up to 10 times to get a port that's not already allocated.
In the event of failure to bind, return the error that portmapper.Map
yields.
*/
for i := 0; i < 10; i++ {
// host ip, proto, and host port
hostPort, err = portallocator.RequestPort(ip, proto, origHostPort)
if err != nil {
return job.Error(err)
}
if proto == "tcp" {
host = &net.TCPAddr{IP: ip, Port: hostPort}
container = &net.TCPAddr{IP: network.IP, Port: containerPort}
} else {
host = &net.UDPAddr{IP: ip, Port: hostPort}
container = &net.UDPAddr{IP: network.IP, Port: containerPort}
}
if err = portmapper.Map(container, ip, hostPort); err == nil {
break
}
job.Logf("Failed to bind %s:%d for container address %s:%d. Trying another port.", ip.String(), hostPort, network.IP.String(), containerPort)
}
if err := portmapper.Map(container, ip, hostPort); err != nil {
portallocator.ReleasePort(ip, proto, hostPort)
if err != nil {
return job.Error(err)
}
network.PortMappings = append(network.PortMappings, host)
out := engine.Env{}

View file

@ -4,12 +4,21 @@ import (
"encoding/binary"
"errors"
"github.com/dotcloud/docker/daemon/networkdriver"
"github.com/dotcloud/docker/pkg/collections"
"net"
"sync"
)
type networkSet map[string]*collections.OrderedIntSet
// allocatedMap is thread-unsafe set of allocated IP
type allocatedMap struct {
p map[int32]struct{}
last int32
}
func newAllocatedMap() *allocatedMap {
return &allocatedMap{p: make(map[int32]struct{})}
}
type networkSet map[string]*allocatedMap
var (
ErrNoAvailableIPs = errors.New("no available ip addresses on network")
@ -19,92 +28,74 @@ var (
var (
lock = sync.Mutex{}
allocatedIPs = networkSet{}
availableIPS = networkSet{}
)
// RequestIP requests an available ip from the given network. It
// will return the next available ip if the ip provided is nil. If the
// ip provided is not nil it will validate that the provided ip is available
// for use or return an error
func RequestIP(address *net.IPNet, ip *net.IP) (*net.IP, error) {
func RequestIP(network *net.IPNet, ip *net.IP) (*net.IP, error) {
lock.Lock()
defer lock.Unlock()
checkAddress(address)
key := network.String()
allocated, ok := allocatedIPs[key]
if !ok {
allocated = newAllocatedMap()
allocatedIPs[key] = allocated
}
if ip == nil {
next, err := getNextIp(address)
if err != nil {
return nil, err
}
return next, nil
return allocated.getNextIP(network)
}
if err := registerIP(address, ip); err != nil {
return nil, err
}
return ip, nil
return allocated.checkIP(network, ip)
}
// ReleaseIP adds the provided ip back into the pool of
// available ips to be returned for use.
func ReleaseIP(address *net.IPNet, ip *net.IP) error {
func ReleaseIP(network *net.IPNet, ip *net.IP) error {
lock.Lock()
defer lock.Unlock()
checkAddress(address)
var (
existing = allocatedIPs[address.String()]
available = availableIPS[address.String()]
pos = getPosition(address, ip)
)
existing.Remove(int(pos))
available.Push(int(pos))
if allocated, exists := allocatedIPs[network.String()]; exists {
pos := getPosition(network, ip)
delete(allocated.p, pos)
}
return nil
}
// convert the ip into the position in the subnet. Only
// position are saved in the set
func getPosition(address *net.IPNet, ip *net.IP) int32 {
var (
first, _ = networkdriver.NetworkRange(address)
base = ipToInt(&first)
i = ipToInt(ip)
)
return i - base
func getPosition(network *net.IPNet, ip *net.IP) int32 {
first, _ := networkdriver.NetworkRange(network)
return ipToInt(ip) - ipToInt(&first)
}
func (allocated *allocatedMap) checkIP(network *net.IPNet, ip *net.IP) (*net.IP, error) {
pos := getPosition(network, ip)
if _, ok := allocated.p[pos]; ok {
return nil, ErrIPAlreadyAllocated
}
allocated.p[pos] = struct{}{}
allocated.last = pos
return ip, nil
}
// return an available ip if one is currently available. If not,
// return the next available ip for the nextwork
func getNextIp(address *net.IPNet) (*net.IP, error) {
func (allocated *allocatedMap) getNextIP(network *net.IPNet) (*net.IP, error) {
var (
ownIP = ipToInt(&address.IP)
available = availableIPS[address.String()]
allocated = allocatedIPs[address.String()]
first, _ = networkdriver.NetworkRange(address)
base = ipToInt(&first)
size = int(networkdriver.NetworkSize(address.Mask))
max = int32(size - 2) // size -1 for the broadcast address, -1 for the gateway address
pos = int32(available.Pop())
ownIP = ipToInt(&network.IP)
first, _ = networkdriver.NetworkRange(network)
base = ipToInt(&first)
size = int(networkdriver.NetworkSize(network.Mask))
max = int32(size - 2) // size -1 for the broadcast network, -1 for the gateway network
pos = allocated.last
)
// We pop and push the position not the ip
if pos != 0 {
ip := intToIP(int32(base + pos))
allocated.Push(int(pos))
return ip, nil
}
var (
firstNetIP = address.IP.To4().Mask(address.Mask)
firstNetIP = network.IP.To4().Mask(network.Mask)
firstAsInt = ipToInt(&firstNetIP) + 1
)
pos = int32(allocated.PullBack())
for i := int32(0); i < max; i++ {
pos = pos%max + 1
next := int32(base + pos)
@ -112,31 +103,16 @@ func getNextIp(address *net.IPNet) (*net.IP, error) {
if next == ownIP || next == firstAsInt {
continue
}
if !allocated.Exists(int(pos)) {
ip := intToIP(next)
allocated.Push(int(pos))
return ip, nil
if _, ok := allocated.p[pos]; ok {
continue
}
allocated.p[pos] = struct{}{}
allocated.last = pos
return intToIP(next), nil
}
return nil, ErrNoAvailableIPs
}
func registerIP(address *net.IPNet, ip *net.IP) error {
var (
existing = allocatedIPs[address.String()]
available = availableIPS[address.String()]
pos = getPosition(address, ip)
)
if existing.Exists(int(pos)) {
return ErrIPAlreadyAllocated
}
available.Remove(int(pos))
return nil
}
// Converts a 4 bytes IP into a 32 bit integer
func ipToInt(ip *net.IP) int32 {
return int32(binary.BigEndian.Uint32(ip.To4()))
@ -149,11 +125,3 @@ func intToIP(n int32) *net.IP {
ip := net.IP(b)
return &ip
}
func checkAddress(address *net.IPNet) {
key := address.String()
if _, exists := allocatedIPs[key]; !exists {
allocatedIPs[key] = collections.NewOrderedIntSet()
availableIPS[key] = collections.NewOrderedIntSet()
}
}

View file

@ -8,7 +8,6 @@ import (
func reset() {
allocatedIPs = networkSet{}
availableIPS = networkSet{}
}
func TestRequestNewIps(t *testing.T) {
@ -18,8 +17,10 @@ func TestRequestNewIps(t *testing.T) {
Mask: []byte{255, 255, 255, 0},
}
var ip *net.IP
var err error
for i := 2; i < 10; i++ {
ip, err := RequestIP(network, nil)
ip, err = RequestIP(network, nil)
if err != nil {
t.Fatal(err)
}
@ -28,6 +29,17 @@ func TestRequestNewIps(t *testing.T) {
t.Fatalf("Expected ip %s got %s", expected, ip.String())
}
}
value := intToIP(ipToInt(ip) + 1).String()
if err := ReleaseIP(network, ip); err != nil {
t.Fatal(err)
}
ip, err = RequestIP(network, nil)
if err != nil {
t.Fatal(err)
}
if ip.String() != value {
t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String())
}
}
func TestReleaseIp(t *testing.T) {
@ -64,6 +76,17 @@ func TestGetReleasedIp(t *testing.T) {
t.Fatal(err)
}
for i := 0; i < 252; i++ {
_, err = RequestIP(network, nil)
if err != nil {
t.Fatal(err)
}
err = ReleaseIP(network, ip)
if err != nil {
t.Fatal(err)
}
}
ip, err = RequestIP(network, nil)
if err != nil {
t.Fatal(err)
@ -185,24 +208,6 @@ func TestIPAllocator(t *testing.T) {
newIPs[i] = ip
}
// Before loop begin
// 2(u) - 3(u) - 4(f) - 5(f) - 6(f)
// ↑
// After i = 0
// 2(u) - 3(u) - 4(f) - 5(u) - 6(f)
// ↑
// After i = 1
// 2(u) - 3(u) - 4(f) - 5(u) - 6(u)
// ↑
// After i = 2
// 2(u) - 3(u) - 4(u) - 5(u) - 6(u)
// ↑
// Reordered these because the new set will always return the
// lowest ips first and not in the order that they were released
assertIPEquals(t, &expectedIPs[2], newIPs[0])
assertIPEquals(t, &expectedIPs[3], newIPs[1])
assertIPEquals(t, &expectedIPs[4], newIPs[2])
@ -234,8 +239,105 @@ func TestAllocateFirstIP(t *testing.T) {
}
}
func TestAllocateAllIps(t *testing.T) {
defer reset()
network := &net.IPNet{
IP: []byte{192, 168, 0, 1},
Mask: []byte{255, 255, 255, 0},
}
var (
current, first *net.IP
err error
isFirst = true
)
for err == nil {
current, err = RequestIP(network, nil)
if isFirst {
first = current
isFirst = false
}
}
if err != ErrNoAvailableIPs {
t.Fatal(err)
}
if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs {
t.Fatal(err)
}
if err := ReleaseIP(network, first); err != nil {
t.Fatal(err)
}
again, err := RequestIP(network, nil)
if err != nil {
t.Fatal(err)
}
assertIPEquals(t, first, again)
}
func TestAllocateDifferentSubnets(t *testing.T) {
defer reset()
network1 := &net.IPNet{
IP: []byte{192, 168, 0, 1},
Mask: []byte{255, 255, 255, 0},
}
network2 := &net.IPNet{
IP: []byte{127, 0, 0, 1},
Mask: []byte{255, 255, 255, 0},
}
expectedIPs := []net.IP{
0: net.IPv4(192, 168, 0, 2),
1: net.IPv4(192, 168, 0, 3),
2: net.IPv4(127, 0, 0, 2),
3: net.IPv4(127, 0, 0, 3),
}
ip11, err := RequestIP(network1, nil)
if err != nil {
t.Fatal(err)
}
ip12, err := RequestIP(network1, nil)
if err != nil {
t.Fatal(err)
}
ip21, err := RequestIP(network2, nil)
if err != nil {
t.Fatal(err)
}
ip22, err := RequestIP(network2, nil)
if err != nil {
t.Fatal(err)
}
assertIPEquals(t, &expectedIPs[0], ip11)
assertIPEquals(t, &expectedIPs[1], ip12)
assertIPEquals(t, &expectedIPs[2], ip21)
assertIPEquals(t, &expectedIPs[3], ip22)
}
func assertIPEquals(t *testing.T, ip1, ip2 *net.IP) {
if !ip1.Equal(*ip2) {
t.Fatalf("Expected IP %s, got %s", ip1, ip2)
}
}
func BenchmarkRequestIP(b *testing.B) {
network := &net.IPNet{
IP: []byte{192, 168, 0, 1},
Mask: []byte{255, 255, 255, 0},
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < 253; j++ {
_, err := RequestIP(network, nil)
if err != nil {
b.Fatal(err)
}
}
reset()
}
}

View file

@ -2,21 +2,21 @@ package portallocator
import (
"errors"
"github.com/dotcloud/docker/pkg/collections"
"net"
"sync"
)
type (
portMap map[int]bool
protocolMap map[string]portMap
ipMapping map[string]protocolMap
)
const (
BeginPortRange = 49153
EndPortRange = 65535
)
type (
portMappings map[string]*collections.OrderedIntSet
ipMapping map[string]portMappings
)
var (
ErrAllPortsAllocated = errors.New("all ports are allocated")
ErrPortAlreadyAllocated = errors.New("port has already been allocated")
@ -24,165 +24,106 @@ var (
)
var (
currentDynamicPort = map[string]int{
"tcp": BeginPortRange - 1,
"udp": BeginPortRange - 1,
}
defaultIP = net.ParseIP("0.0.0.0")
defaultAllocatedPorts = portMappings{}
otherAllocatedPorts = ipMapping{}
lock = sync.Mutex{}
mutex sync.Mutex
defaultIP = net.ParseIP("0.0.0.0")
globalMap = ipMapping{}
)
func init() {
defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet()
defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet()
}
// RequestPort returns an available port if the port is 0
// If the provided port is not 0 then it will be checked if
// it is available for allocation
func RequestPort(ip net.IP, proto string, port int) (int, error) {
lock.Lock()
defer lock.Unlock()
mutex.Lock()
defer mutex.Unlock()
if err := validateProtocol(proto); err != nil {
if err := validateProto(proto); err != nil {
return 0, err
}
// If the user requested a specific port to be allocated
ip = getDefault(ip)
mapping := getOrCreate(ip)
if port > 0 {
if err := registerSetPort(ip, proto, port); err != nil {
if !mapping[proto][port] {
mapping[proto][port] = true
return port, nil
} else {
return 0, ErrPortAlreadyAllocated
}
} else {
port, err := findPort(ip, proto)
if err != nil {
return 0, err
}
return port, nil
}
return registerDynamicPort(ip, proto)
}
// ReleasePort will return the provided port back into the
// pool for reuse
func ReleasePort(ip net.IP, proto string, port int) error {
lock.Lock()
defer lock.Unlock()
mutex.Lock()
defer mutex.Unlock()
if err := validateProtocol(proto); err != nil {
return err
}
ip = getDefault(ip)
allocated := defaultAllocatedPorts[proto]
allocated.Remove(port)
mapping := getOrCreate(ip)
delete(mapping[proto], port)
if !equalsDefault(ip) {
registerIP(ip)
// Remove the port for the specific ip address
allocated = otherAllocatedPorts[ip.String()][proto]
allocated.Remove(port)
}
return nil
}
func ReleaseAll() error {
lock.Lock()
defer lock.Unlock()
mutex.Lock()
defer mutex.Unlock()
currentDynamicPort["tcp"] = BeginPortRange - 1
currentDynamicPort["udp"] = BeginPortRange - 1
defaultAllocatedPorts = portMappings{}
defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet()
defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet()
otherAllocatedPorts = ipMapping{}
globalMap = ipMapping{}
return nil
}
func registerDynamicPort(ip net.IP, proto string) (int, error) {
func getOrCreate(ip net.IP) protocolMap {
ipstr := ip.String()
if !equalsDefault(ip) {
registerIP(ip)
ipAllocated := otherAllocatedPorts[ip.String()][proto]
port, err := findNextPort(proto, ipAllocated)
if err != nil {
return 0, err
if _, ok := globalMap[ipstr]; !ok {
globalMap[ipstr] = protocolMap{
"tcp": portMap{},
"udp": portMap{},
}
ipAllocated.Push(port)
return port, nil
} else {
allocated := defaultAllocatedPorts[proto]
port, err := findNextPort(proto, allocated)
if err != nil {
return 0, err
}
allocated.Push(port)
return port, nil
}
}
func registerSetPort(ip net.IP, proto string, port int) error {
allocated := defaultAllocatedPorts[proto]
if allocated.Exists(port) {
return ErrPortAlreadyAllocated
}
if !equalsDefault(ip) {
registerIP(ip)
ipAllocated := otherAllocatedPorts[ip.String()][proto]
if ipAllocated.Exists(port) {
return ErrPortAlreadyAllocated
}
ipAllocated.Push(port)
} else {
allocated.Push(port)
}
return nil
return globalMap[ipstr]
}
func equalsDefault(ip net.IP) bool {
return ip == nil || ip.Equal(defaultIP)
}
func findPort(ip net.IP, proto string) (int, error) {
port := BeginPortRange
func findNextPort(proto string, allocated *collections.OrderedIntSet) (int, error) {
port := nextPort(proto)
startSearchPort := port
for allocated.Exists(port) {
port = nextPort(proto)
if startSearchPort == port {
mapping := getOrCreate(ip)
for mapping[proto][port] {
port++
if port > EndPortRange {
return 0, ErrAllPortsAllocated
}
}
mapping[proto][port] = true
return port, nil
}
func nextPort(proto string) int {
c := currentDynamicPort[proto] + 1
if c > EndPortRange {
c = BeginPortRange
func getDefault(ip net.IP) net.IP {
if ip == nil {
return defaultIP
}
currentDynamicPort[proto] = c
return c
return ip
}
func registerIP(ip net.IP) {
if _, exists := otherAllocatedPorts[ip.String()]; !exists {
otherAllocatedPorts[ip.String()] = portMappings{
"tcp": collections.NewOrderedIntSet(),
"udp": collections.NewOrderedIntSet(),
}
}
}
func validateProtocol(proto string) error {
if _, exists := defaultAllocatedPorts[proto]; !exists {
func validateProto(proto string) error {
if proto != "tcp" && proto != "udp" {
return ErrUnknownProtocol
}
return nil
}

View file

@ -2,14 +2,16 @@ package daemon
import (
"fmt"
"github.com/dotcloud/docker/utils"
"sync"
"time"
"github.com/dotcloud/docker/pkg/units"
)
type State struct {
sync.RWMutex
Running bool
Paused bool
Pid int
ExitCode int
StartedAt time.Time
@ -22,12 +24,15 @@ func (s *State) String() string {
defer s.RUnlock()
if s.Running {
return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
if s.Paused {
return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
}
return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
}
if s.FinishedAt.IsZero() {
return ""
}
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, utils.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
}
func (s *State) IsRunning() bool {
@ -49,6 +54,7 @@ func (s *State) SetRunning(pid int) {
defer s.Unlock()
s.Running = true
s.Paused = false
s.ExitCode = 0
s.Pid = pid
s.StartedAt = time.Now().UTC()
@ -63,3 +69,22 @@ func (s *State) SetStopped(exitCode int) {
s.FinishedAt = time.Now().UTC()
s.ExitCode = exitCode
}
func (s *State) SetPaused() {
s.Lock()
defer s.Unlock()
s.Paused = true
}
func (s *State) SetUnpaused() {
s.Lock()
defer s.Unlock()
s.Paused = false
}
func (s *State) IsPaused() bool {
s.RLock()
defer s.RUnlock()
return s.Paused
}

View file

@ -2,10 +2,10 @@ package daemon
import (
"fmt"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/pkg/namesgenerator"
"github.com/dotcloud/docker/runconfig"
"strings"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/runconfig"
)
func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error {
@ -49,16 +49,3 @@ func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[
driverConfig["lxc"] = lxc
}
}
type checker struct {
daemon *Daemon
}
func (c *checker) Exists(name string) bool {
return c.daemon.containerGraph.Exists("/" + name)
}
// Generate a random and unique name
func generateRandomName(daemon *Daemon) (string, error) {
return namesgenerator.GenerateRandomName(&checker{daemon})
}

View file

@ -10,7 +10,7 @@ import (
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/pkg/symlink"
)
type BindMap struct {
@ -40,8 +40,11 @@ func setupMountsForContainer(container *Container) error {
{container.ResolvConfPath, "/etc/resolv.conf", false, true},
}
if container.HostnamePath != "" && container.HostsPath != "" {
if container.HostnamePath != "" {
mounts = append(mounts, execdriver.Mount{container.HostnamePath, "/etc/hostname", false, true})
}
if container.HostsPath != "" {
mounts = append(mounts, execdriver.Mount{container.HostsPath, "/etc/hosts", false, true})
}
@ -94,13 +97,16 @@ func applyVolumesFrom(container *Container) error {
if _, exists := container.Volumes[volPath]; exists {
continue
}
stat, err := os.Stat(filepath.Join(c.basefs, volPath))
stat, err := os.Stat(c.getResourcePath(volPath))
if err != nil {
return err
}
if err := createIfNotExists(filepath.Join(container.basefs, volPath), stat.IsDir()); err != nil {
if err := createIfNotExists(container.getResourcePath(volPath), stat.IsDir()); err != nil {
return err
}
container.Volumes[volPath] = id
if isRW, exists := c.VolumesRW[volPath]; exists {
container.VolumesRW[volPath] = isRW && mountRW
@ -162,137 +168,169 @@ func createVolumes(container *Container) error {
return err
}
volumesDriver := container.daemon.volumes.Driver()
// Create the requested volumes if they don't exist
for volPath := range container.Config.Volumes {
volPath = filepath.Clean(volPath)
volIsDir := true
// Skip existing volumes
if _, exists := container.Volumes[volPath]; exists {
continue
}
var srcPath string
var isBindMount bool
srcRW := false
// If an external bind is defined for this volume, use that as a source
if bindMap, exists := binds[volPath]; exists {
isBindMount = true
srcPath = bindMap.SrcPath
if !filepath.IsAbs(srcPath) {
return fmt.Errorf("%s must be an absolute path", srcPath)
}
if strings.ToLower(bindMap.Mode) == "rw" {
srcRW = true
}
if stat, err := os.Stat(bindMap.SrcPath); err != nil {
return err
} else {
volIsDir = stat.IsDir()
}
// Otherwise create an directory in $ROOT/volumes/ and use that
} else {
// Do not pass a container as the parameter for the volume creation.
// The graph driver using the container's information ( Image ) to
// create the parent.
c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil)
if err != nil {
return err
}
srcPath, err = volumesDriver.Get(c.ID, "")
if err != nil {
return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
}
srcRW = true // RW by default
}
if p, err := filepath.EvalSymlinks(srcPath); err != nil {
return err
} else {
srcPath = p
}
// Create the mountpoint
rootVolPath, err := utils.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs)
if err != nil {
if err := initializeVolume(container, volPath, binds); err != nil {
return err
}
}
newVolPath, err := filepath.Rel(container.basefs, rootVolPath)
if err != nil {
for volPath := range binds {
if err := initializeVolume(container, volPath, binds); err != nil {
return err
}
newVolPath = "/" + newVolPath
if volPath != newVolPath {
delete(container.Volumes, volPath)
delete(container.VolumesRW, volPath)
}
container.Volumes[newVolPath] = srcPath
container.VolumesRW[newVolPath] = srcRW
if err := createIfNotExists(rootVolPath, volIsDir); err != nil {
return err
}
// Do not copy or change permissions if we are mounting from the host
if srcRW && !isBindMount {
volList, err := ioutil.ReadDir(rootVolPath)
if err != nil {
return err
}
if len(volList) > 0 {
srcList, err := ioutil.ReadDir(srcPath)
if err != nil {
return err
}
if len(srcList) == 0 {
// If the source volume is empty copy files from the root into the volume
if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
return err
}
}
}
var stat syscall.Stat_t
if err := syscall.Stat(rootVolPath, &stat); err != nil {
return err
}
var srcStat syscall.Stat_t
if err := syscall.Stat(srcPath, &srcStat); err != nil {
return err
}
// Change the source volume's ownership if it differs from the root
// files that were just copied
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
}
}
}
return nil
}
func createIfNotExists(path string, isDir bool) error {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
if isDir {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
} else {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_CREATE, 0755)
if err != nil {
return err
}
defer f.Close()
func createIfNotExists(destination string, isDir bool) error {
if _, err := os.Stat(destination); err != nil && os.IsNotExist(err) {
if isDir {
if err := os.MkdirAll(destination, 0755); err != nil {
return err
}
} else {
if err := os.MkdirAll(filepath.Dir(destination), 0755); err != nil {
return err
}
f, err := os.OpenFile(destination, os.O_CREATE, 0755)
if err != nil {
return err
}
f.Close()
}
}
return nil
}
func initializeVolume(container *Container, volPath string, binds map[string]BindMap) error {
volumesDriver := container.daemon.volumes.Driver()
volPath = filepath.Clean(volPath)
// Skip existing volumes
if _, exists := container.Volumes[volPath]; exists {
return nil
}
var (
destination string
isBindMount bool
volIsDir = true
srcRW = false
)
// If an external bind is defined for this volume, use that as a source
if bindMap, exists := binds[volPath]; exists {
isBindMount = true
destination = bindMap.SrcPath
if !filepath.IsAbs(destination) {
return fmt.Errorf("%s must be an absolute path", destination)
}
if strings.ToLower(bindMap.Mode) == "rw" {
srcRW = true
}
if stat, err := os.Stat(bindMap.SrcPath); err != nil {
return err
} else {
volIsDir = stat.IsDir()
}
} else {
// Do not pass a container as the parameter for the volume creation.
// The graph driver using the container's information ( Image ) to
// create the parent.
c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil)
if err != nil {
return err
}
destination, err = volumesDriver.Get(c.ID, "")
if err != nil {
return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
}
srcRW = true
}
if p, err := filepath.EvalSymlinks(destination); err != nil {
return err
} else {
destination = p
}
// Create the mountpoint
source, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs)
if err != nil {
return err
}
newVolPath, err := filepath.Rel(container.basefs, source)
if err != nil {
return err
}
newVolPath = "/" + newVolPath
if volPath != newVolPath {
delete(container.Volumes, volPath)
delete(container.VolumesRW, volPath)
}
container.Volumes[newVolPath] = destination
container.VolumesRW[newVolPath] = srcRW
if err := createIfNotExists(source, volIsDir); err != nil {
return err
}
// Do not copy or change permissions if we are mounting from the host
if srcRW && !isBindMount {
if err := copyExistingContents(source, destination); err != nil {
return err
}
}
return nil
}
func copyExistingContents(source, destination string) error {
volList, err := ioutil.ReadDir(source)
if err != nil {
return err
}
if len(volList) > 0 {
srcList, err := ioutil.ReadDir(destination)
if err != nil {
return err
}
if len(srcList) == 0 {
// If the source volume is empty copy files from the root into the volume
if err := archive.CopyWithTar(source, destination); err != nil {
return err
}
}
}
return copyOwnership(source, destination)
}
// copyOwnership copies the permissions and uid:gid of the source file
// into the destination file
func copyOwnership(source, destination string) error {
var stat syscall.Stat_t
if err := syscall.Stat(source, &stat); err != nil {
return err
}
if err := os.Chown(destination, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
return os.Chmod(destination, os.FileMode(stat.Mode))
}

3
daemonconfig/README.md Normal file
View file

@ -0,0 +1,3 @@
This directory contains code pertaining to the configuration of the docker deamon
These are the configuration settings that you pass to the docker daemon when you launch it with say: `docker -d -e lxc`

View file

@ -25,6 +25,7 @@ type Config struct {
BridgeIP string
InterContainerCommunication bool
GraphDriver string
GraphOptions []string
ExecDriver string
Mtu int
DisableNetwork bool
@ -49,6 +50,10 @@ func ConfigFromJob(job *engine.Job) *Config {
ExecDriver: job.Getenv("ExecDriver"),
EnableSelinuxSupport: job.GetenvBool("EnableSelinuxSupport"),
}
if graphOpts := job.GetenvList("GraphOptions"); graphOpts != nil {
config.GraphOptions = graphOpts
}
if dns := job.GetenvList("Dns"); dns != nil {
config.Dns = dns
}

View file

@ -41,6 +41,7 @@ func main() {
var (
flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit")
flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode")
flGraphOpts opts.ListOpts
flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode")
flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers")
bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
@ -69,6 +70,7 @@ func main() {
flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers")
flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.")
flag.Var(&flGraphOpts, []string{"-storage-opt"}, "Set storage driver options")
flag.Parse()
@ -98,6 +100,9 @@ func main() {
}
if *flDaemon {
if runtime.GOOS != "linux" {
log.Fatalf("The Docker daemon is only supported on linux")
}
if os.Geteuid() != 0 {
log.Fatalf("The Docker daemon needs to be run as root")
}
@ -153,6 +158,7 @@ func main() {
job.Setenv("DefaultIp", *flDefaultIp)
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
job.Setenv("GraphDriver", *flGraphDriver)
job.SetenvList("GraphOptions", flGraphOpts.GetAll())
job.Setenv("ExecDriver", *flExecDriver)
job.SetenvInt("Mtu", *flMtu)
job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled)
@ -185,6 +191,7 @@ func main() {
job.Setenv("TlsCa", *flCa)
job.Setenv("TlsCert", *flCert)
job.Setenv("TlsKey", *flKey)
job.SetenvBool("BufferRequests", true)
if err := job.Run(); err != nil {
log.Fatal(err)
}

Some files were not shown because too many files have changed in this diff Show more