diff --git a/.mailmap b/.mailmap index a34fc4823c..683758650e 100644 --- a/.mailmap +++ b/.mailmap @@ -6,14 +6,16 @@ Guillaume J. Charmes + - -Thatcher Peskens dhrp -Thatcher Peskens dhrp +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp Jérôme Petazzoni jpetazzo Jérôme Petazzoni -Joffrey F - +Joffrey F +Joffrey F +Joffrey F Tim Terhorst Andy Smith @@ -23,7 +25,6 @@ Andy Smith -Thatcher Peskens Walter Stanish @@ -54,7 +55,26 @@ Jean-Baptiste Dalido - - -Sven Dowideit ¨Sven <¨SvenDowideit@home.org.au¨> + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> unclejack + +Alexandr Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Will Weaver diff --git a/AUTHORS b/AUTHORS index adfcfaa851..10f01fb589 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,44 +1,62 @@ # This file lists all individuals having contributed content to the repository. -# If you're submitting a patch, please add your name here in alphabetical order as part of the patch. -# -# For a list of active project maintainers, see the MAINTAINERS file. -# +# For how it is generated, see `.mailmap`. + Aanand Prasad Aaron Feng +Aaron Huslage Abel Muiño +Adam Miller +Adam Singer +Aditya +Adrian Mouat +alambike +Aleksa Sarai Alexander Larsson +Alexandr Morozov +Alexey Kotlyarov Alexey Shamrin Alex Gaynor Alexis THOMAS +almoehi Al Tobey +amangoel Andrea Luzzardi Andreas Savvides Andreas Tiefenthaler +Andrea Turli Andrew Duckworth Andrew Macgregor Andrew Munsell Andrews Medina +Andrew Williams Andy Chambers andy diller Andy Goldstein +Andy Kipp Andy Rothfusz Andy Smith Anthony Bishopric Anton Nikitin Antony Messerli apocas +Arnaud Porterie Asbjørn Enge +Barnaby Gray Barry Allard Bartłomiej Piotrowski +Benjamin Atkin Benoit Chesneau Ben Sargent Ben Toews Ben Wiklund +Bernerd Schaefer Bhiraj Butala +bin liu Bouke Haarsma Brandon Liu Brandon Philips Brian Dorsey +Brian Flad Brian Goff Brian McCallister Brian Olsen @@ -46,11 +64,15 @@ Brian Shumate Briehan Lombaard Bruno Bigras Bryan Matsuo +Bryan Murphy Caleb Spare Calen Pennington +Cameron Boehmer Carl X. Su Charles Hooper Charles Lindsay +Charles Merriam +Charlie Lewis Chia-liang Kao Chris St. Pierre Christopher Currie @@ -61,6 +83,7 @@ Colin Dunklau Colin Rice Cory Forsyth cressie176 +Dafydd Crosby Dan Buch Dan Hirsch Daniel Exner @@ -72,30 +95,45 @@ Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin +Dan Keder +Dan McPherson +Danny Berger Danny Yates +Dan Stine +Dan Walsh +Dan Williams Darren Coxall +Darren Shepherd David Anderson David Calavera +David Gageot David Mcanulty +David Röthlisberger David Sissitka Deni Bertovic Dinesh Subhraveti +Djibril Koné dkumor Dmitry Demeshchuk +Dolph Mathews Dominik Honnef Don Spaulding Dražen Lučanin Dr Nic Williams Dustin Sallings Edmund Wagner +Eiichi Tsukata +Eivind Uggedal Elias Probst Emil Hernvall Emily Rose Eric Hanchrow Eric Lee Eric Myhre +Erik Hollensbe Erno Hopearuoho eugenkrizo +Evan Hazlett Evan Krall Evan Phoenix Evan Wies @@ -106,6 +144,7 @@ Fabio Rehm Fabrizio Regini Faiz Khan Fareed Dudhia +Felix Rabe Fernando Flavio Castelli Francisco Souza @@ -117,8 +156,11 @@ Gabe Rosenhouse Gabriel Monroy Galen Sampson Gareth Rushgrove +Geoffrey Bachelet Gereon Frey +German DZ Gert van Valkenhoef +Goffert van Gool Graydon Hoare Greg Thornton grunny @@ -127,28 +169,40 @@ Gurjeet Singh Guruprasad Harley Laue Hector Castro +Hobofan Hunter Blanks +Ian Truslove +ILYA Khlopotov inglesp Isaac Dupree +Isabel Jimenez Isao Jonas +Jack Danger Canty +jakedt Jake Moshenko James Allen James Carr +James DeFelice +James Harrison Fisher James Mills James Turnbull jaseg Jason McVetta +Jason Plum Jean-Baptiste Barth Jean-Baptiste Dalido Jeff Lindsay Jeremy Grosser Jérôme Petazzoni Jesse Dubay +Jilles Oldenbeuving Jim Alateras Jimmy Cuadra Joe Beda +Joel Handwell +Joe Shaw Joe Van Dyk -Joffrey F +Joffrey F Johan Euphrosine Johannes 'fish' Ziemke Johan Rydberg @@ -157,7 +211,9 @@ John Feminella John Gardiner Myers John Warwick Jonas Pfenniger +Jonathan McCrohan Jonathan Mueller +Jonathan Pares Jonathan Rudenberg Jon Wedaman Joost Cassee @@ -172,13 +228,17 @@ Julien Barbier Julien Dubois Justin Force Justin Plock +Justin Simonelis Karan Lyons Karl Grzeszczak +Kato Kazuyoshi Kawsar Saiyeed Keli Hu Ken Cochrane +Ken ICHIKAWA Kevin Clark Kevin J. Lynagh +Kevin Menard Kevin Wallace Keyvan Fatehi kim0 @@ -187,14 +247,20 @@ Kimbro Staken Kiran Gangadharan Konstantin Pelykh Kyle Conroy +lalyos +Lance Chen +Lars R. Damerow Laurie Voss +Lewis Peckover Liang-Chi Hsieh Lokesh Mandvekar Louis Opter lukaspustina +lukemarsden Mahesh Tiyyagura Manuel Meurer Manuel Woelker +Marc Abramowitz Marc Kuo Marco Hennings Marcus Farkas @@ -206,23 +272,32 @@ Marko Mikulicic Markus Fix Martijn van Oosterhout Martin Redmond +Mason Malone +Mateusz Sulima Mathieu Le Marec - Pasquet Matt Apperson Matt Bachmann Matt Haggard Matthew Mueller +Matthias Klumpp +Matthias Kühnle mattymo Maxime Petazzoni Maxim Treskin +Max Shytikov meejah +Michael Brown Michael Crosby Michael Gorsuch +Michael Neale Michael Stapelberg Miguel Angel Fernández Mike Gaffney +Mike MacCana Mike Naberezny Mikhail Sobolev Mohit Soni +Morgante Pell Morten Siebuhr Nan Monnand Deng Nate Jones @@ -234,22 +309,26 @@ Nick Stenning Nick Stinemates Nicolas Dudebout Nicolas Kaiser +noducks Nolan Darilek odk- Oguz Bilgic Ole Reifschneider -O.S.Tezer +O.S. Tezer pandrew Pascal Borreli pattichen +Paul Annesley Paul Bowsher Paul Hammond +Paul Jimenez Paul Lietar Paul Morie Paul Nasrat Paul Peter Braden Peter Waller +Phillip Alexander Phil Spitler Piergiuliano Bossi Pierre-Alain RIVIERE @@ -257,6 +336,8 @@ Piotr Bogdan pysqz Quentin Brossard Rafal Jeczalik +Rajat Pandit +Ralph Bean Ramkumar Ramachandra Ramon van Alteren Renato Riccieri Santos Zannon @@ -266,54 +347,71 @@ Richo Healey Rick Bradley Robert Obryk Roberto G. Hashioka -Roberto Hashioka +robpc Rodrigo Vaz Roel Van Nyen Roger Peppe +Rohit Jnagal +Roland Moriz +Rovanion Luckey +Ryan Aslett Ryan Fowler Ryan O'Donnell Ryan Seto +Ryan Thomas Sam Alba Sam J Sharpe +Sam Rijs Samuel Andaya Scott Bessler +Scott Collier Sean Cronin Sean P. Kane +Sébastien Stormacq Shawn Landden Shawn Siefkas Shih-Yuan Lee -shin- Silas Sewell Simon Taranto +Sindhu S Sjoerd Langkemper -Solomon Hykes +Solomon Hykes Song Gao +Soulou Sridatta Thatipamala Sridhar Ratnakumar Steeve Morin Stefan Praszalowicz +Steven Burgess sudosurootdev -Sven Dowideit +Sven Dowideit Sylvain Bellemare tang0th Tatsuki Sugiura Tehmasp Chaudhri -Thatcher Peskens +Thatcher Peskens Thermionix Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Hansen Thomas LEVEIL +Thomas Schroeter Tianon Gravi -Tim Bosse +Tibor Vass +Tim Bosse +Timothy Hobbs +Tim Ruffles Tim Terhorst +tjmehta Tobias Bieniek Tobias Schmidt Tobias Schwab Todd Lunter +Tom Fotherby Tom Hulihan Tommaso Visconti +Tony Daws Travis Cline Tyler Brock Tzu-Jung Lee @@ -322,26 +420,35 @@ unclejack vgeta Victor Coisne Victor Lyuboslavsky +Victor Marmol Victor Vieux +Viktor Vojnovski Vincent Batts Vincent Bernat +Vincent Mayers Vincent Woo Vinod Kulkarni +Vishnu Kannan Vitor Monteiro Vivek Agarwal +Vladimir Bulyga Vladimir Kirillov -Vladimir Rutsky +Vladimir Rutsky +Walter Leibbrandt Walter Stanish WarheadsSE Wes Morgan Will Dietz William Delanoue +William Henry Will Rouesnel Will Weaver Xiuming Chen Yang Bai +Yasunori Mahata Yurii Rashkovskii Zain Memon Zaiste! Zilin Du zimbatm +zqh diff --git a/CHANGELOG.md b/CHANGELOG.md index bd6dc6026e..14329ab96c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + ## 0.11.0 (2014-05-07) #### Notable features since 0.10.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d77afbc443..f59e49d465 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -182,7 +182,7 @@ One way to automate this, is customise your get ``commit.template`` by adding a ``prepare-commit-msg`` hook to your docker checkout: ``` -curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg +curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg ``` * Note: the above script expects to find your GitHub user name in ``git config --get github.user`` @@ -192,7 +192,10 @@ curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/mas There are several exceptions to the signing requirement. Currently these are: * Your patch fixes spelling or grammar errors. -* Your patch is a single line change to documentation. +* Your patch is a single line change to documentation contained in the + `docs` directory. +* Your patch fixes Markdown formatting or syntax errors in the + documentation contained in the `docs` directory. If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io) diff --git a/Dockerfile b/Dockerfile index be2233ff87..283e0a3262 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,7 +24,7 @@ # docker-version 0.6.1 -FROM ubuntu:13.10 +FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) # Packaged dependencies @@ -41,6 +41,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ libapparmor-dev \ libcap-dev \ libsqlite3-dev \ + lxc=1.0* \ mercurial \ pandoc \ reprepro \ @@ -49,10 +50,6 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ s3cmd=1.1.0* \ --no-install-recommends -# Get and compile LXC 0.8 (since it is the most stable) -RUN git clone --no-checkout https://github.com/lxc/lxc.git /usr/local/lxc && cd /usr/local/lxc && git checkout -q lxc-0.8.0 -RUN cd /usr/local/lxc && ./autogen.sh && ./configure --disable-docs && make && make install - # Get lvm2 source for compiling statically RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags @@ -84,7 +81,7 @@ RUN go get code.google.com/p/go.tools/cmd/cover RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 # Get the "busybox" image source so we can build locally instead of pulling -RUN git clone https://github.com/jpetazzo/docker-busybox.git /docker-busybox +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox # Setup s3cmd config RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg @@ -92,6 +89,10 @@ RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_ # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + VOLUME /var/lib/docker WORKDIR /go/src/github.com/dotcloud/docker ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/Makefile b/Makefile index a4c8658e08..a8e4dc5ca1 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ docs-release: docs-build $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh test: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary test-unit test-integration test-integration-cli + $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli test-unit: build $(DOCKER_RUN_DOCKER) hack/make.sh test-unit diff --git a/README.md b/README.md index fae1bb916b..c965efafe8 100644 --- a/README.md +++ b/README.md @@ -190,3 +190,9 @@ It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see http://www.bis.doc.gov + + +Licensing +========= +Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text. + diff --git a/VERSION b/VERSION index eb1336c84d..72ab694d37 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.11.0-dev +0.11.1-dev diff --git a/api/README.md b/api/README.md new file mode 100644 index 0000000000..3ef33f8c29 --- /dev/null +++ b/api/README.md @@ -0,0 +1,5 @@ +This directory contains code pertaining to the Docker API: + + - Used by the docker client when comunicating with the docker deamon + + - Used by third party tools wishing to interface with the docker deamon diff --git a/api/client/cli.go b/api/client/cli.go index 49fb3c978f..bb5d191e16 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -23,6 +23,9 @@ var funcMap = template.FuncMap{ } func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { + if len(name) == 0 { + return nil, false + } methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:]) method := reflect.ValueOf(cli).MethodByName(methodName) if !method.IsValid() { @@ -73,7 +76,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsC } if in != nil { - if file, ok := in.(*os.File); ok { + if file, ok := out.(*os.File); ok { terminalFd = file.Fd() isTerminal = term.IsTerminal(terminalFd) } diff --git a/api/client/commands.go b/api/client/commands.go index 89f9b0a4c4..67c5aec2dc 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -13,7 +13,7 @@ import ( "os" "os/exec" "path" - goruntime "runtime" + "runtime" "strconv" "strings" "syscall" @@ -28,6 +28,7 @@ import ( "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/signal" "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/pkg/units" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" @@ -109,6 +110,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") + forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds") if err := cmd.Parse(args); err != nil { return nil } @@ -160,6 +162,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error { if _, err = os.Stat(filename); os.IsNotExist(err) { return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) } + if err = utils.ValidateContextDirectory(root); err != nil { + return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) + } context, err = archive.Tar(root, archive.Uncompressed) } var body io.Reader @@ -193,6 +198,12 @@ func (cli *DockerCli) CmdBuild(args ...string) error { } if *rm { v.Set("rm", "1") + } else { + v.Set("rm", "0") + } + + if *forceRm { + v.Set("forcerm", "1") } cli.LoadConfigFile() @@ -359,7 +370,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error { fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) } fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION) - fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) + fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version()) if dockerversion.GITCOMMIT != "" { fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) } @@ -384,16 +395,8 @@ func (cli *DockerCli) CmdVersion(args ...string) error { if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" { fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion) } - fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) - release := utils.GetReleaseVersion() - if release != "" { - fmt.Fprintf(cli.out, "Last stable version: %s", release) - if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { - fmt.Fprintf(cli.out, ", please update docker") - } - fmt.Fprintf(cli.out, "\n") - } + fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) return nil } @@ -884,14 +887,14 @@ func (cli *DockerCli) CmdHistory(args ...string) error { fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) } - fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) + fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) if *noTrunc { fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) } else { fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) } - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size"))) + fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("Size"))) } else { if *noTrunc { fmt.Fprintln(w, outID) @@ -1249,7 +1252,7 @@ func (cli *DockerCli) CmdImages(args ...string) error { } if !*quiet { - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize"))) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(out.GetInt64("VirtualSize"))) } else { fmt.Fprintln(w, outID) } @@ -1323,7 +1326,7 @@ func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix stri imageID = utils.TruncateID(image.Get("Id")) } - fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize"))) + fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(image.GetInt64("VirtualSize"))) if image.GetList("RepoTags")[0] != ":" { fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) } else { @@ -1408,12 +1411,12 @@ func (cli *DockerCli) CmdPs(args ...string) error { outCommand = utils.Trunc(outCommand, 20) } ports.ReadListFrom([]byte(out.Get("Ports"))) - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) if *size { if out.GetInt("SizeRootFs") > 0 { - fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs"))) + fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(out.GetInt64("SizeRw")), units.HumanSize(out.GetInt64("SizeRootFs"))) } else { - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw"))) + fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("SizeRw"))) } } else { fmt.Fprint(w, "\n") @@ -1839,6 +1842,10 @@ func (cli *DockerCli) CmdRun(args ...string) error { v := url.Values{} repos, tag := utils.ParseRepositoryTag(config.Image) + // pull only the image tagged 'latest' if no tag was specified + if tag == "" { + tag = "latest" + } v.Set("fromImage", repos) v.Set("tag", tag) @@ -2058,7 +2065,7 @@ func (cli *DockerCli) CmdCp(args ...string) error { } if statusCode == 200 { - if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { + if err := archive.Untar(stream, copyData.Get("HostPath"), &archive.TarOptions{NoLchown: true}); err != nil { return err } } diff --git a/api/common.go b/api/common.go index af4ced4f6e..a20c5d7d1c 100644 --- a/api/common.go +++ b/api/common.go @@ -2,15 +2,16 @@ package api import ( "fmt" + "mime" + "strings" + "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/version" "github.com/dotcloud/docker/utils" - "mime" - "strings" ) const ( - APIVERSION version.Version = "1.11" + APIVERSION version.Version = "1.12" DEFAULTHTTPHOST = "127.0.0.1" DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) @@ -30,7 +31,7 @@ func DisplayablePorts(ports *engine.Table) string { ports.Sort() for _, port := range ports.Data { if port.Get("IP") == "" { - result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type"))) + result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PrivatePort"), port.Get("Type"))) } else { result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) } diff --git a/api/server/server.go b/api/server/server.go index 05d5e60690..e9805864ca 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -122,17 +122,17 @@ func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter var ( authConfig, err = ioutil.ReadAll(r.Body) job = eng.Job("auth") - status string + stdoutBuffer = bytes.NewBuffer(nil) ) if err != nil { return err } job.Setenv("authConfig", string(authConfig)) - job.Stdout.AddString(&status) + job.Stdout.Add(stdoutBuffer) if err = job.Run(); err != nil { return err } - if status != "" { + if status := engine.Tail(stdoutBuffer, 1); status != "" { var env engine.Env env.Set("Status", status) return writeJSON(w, http.StatusOK, env) @@ -244,7 +244,7 @@ func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWrite return err } - var job = eng.Job("events", r.RemoteAddr) + var job = eng.Job("events") streamJSON(job, w, true) job.Setenv("since", r.Form.Get("since")) job.Setenv("until", r.Form.Get("until")) @@ -338,7 +338,7 @@ func getContainersLogs(eng *engine.Engine, version version.Version, w http.Respo } var ( - job = eng.Job("inspect", vars["name"], "container") + job = eng.Job("container_inspect", vars["name"]) c, err = job.Stdout.AddEnv() ) if err != nil { @@ -393,9 +393,10 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit return err } var ( - config engine.Env - env engine.Env - job = eng.Job("commit", r.Form.Get("container")) + config engine.Env + env engine.Env + job = eng.Job("commit", r.Form.Get("container")) + stdoutBuffer = bytes.NewBuffer(nil) ) if err := config.Decode(r.Body); err != nil { utils.Errorf("%s", err) @@ -407,12 +408,11 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit job.Setenv("comment", r.Form.Get("comment")) job.SetenvSubEnv("config", &config) - var id string - job.Stdout.AddString(&id) + job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } - env.Set("Id", id) + env.Set("Id", engine.Tail(stdoutBuffer, 1)) return writeJSON(w, http.StatusCreated, env) } @@ -603,17 +603,17 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re return nil } var ( - out engine.Env - job = eng.Job("create", r.Form.Get("name")) - outWarnings []string - outId string - warnings = bytes.NewBuffer(nil) + out engine.Env + job = eng.Job("create", r.Form.Get("name")) + outWarnings []string + stdoutBuffer = bytes.NewBuffer(nil) + warnings = bytes.NewBuffer(nil) ) if err := job.DecodeEnv(r.Body); err != nil { return err } // Read container ID from the first line of stdout - job.Stdout.AddString(&outId) + job.Stdout.Add(stdoutBuffer) // Read warnings from stderr job.Stderr.Add(warnings) if err := job.Run(); err != nil { @@ -624,7 +624,7 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re for scanner.Scan() { outWarnings = append(outWarnings, scanner.Text()) } - out.Set("Id", outId) + out.Set("Id", engine.Tail(stdoutBuffer, 1)) out.SetList("Warnings", outWarnings) return writeJSON(w, http.StatusCreated, out) } @@ -720,20 +720,16 @@ func postContainersWait(eng *engine.Engine, version version.Version, w http.Resp return fmt.Errorf("Missing parameter") } var ( - env engine.Env - status string - job = eng.Job("wait", vars["name"]) + env engine.Env + stdoutBuffer = bytes.NewBuffer(nil) + job = eng.Job("wait", vars["name"]) ) - job.Stdout.AddString(&status) + job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } - // Parse a 16-bit encoded integer to map typical unix exit status. - _, err := strconv.ParseInt(status, 10, 16) - if err != nil { - return err - } - env.Set("StatusCode", status) + + env.Set("StatusCode", engine.Tail(stdoutBuffer, 1)) return writeJSON(w, http.StatusOK, env) } @@ -759,7 +755,7 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re } var ( - job = eng.Job("inspect", vars["name"], "container") + job = eng.Job("container_inspect", vars["name"]) c, err = job.Stdout.AddEnv() ) if err != nil { @@ -823,7 +819,7 @@ func wsContainersAttach(eng *engine.Engine, version version.Version, w http.Resp return fmt.Errorf("Missing parameter") } - if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil { + if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil { return err } @@ -851,9 +847,8 @@ func getContainersByName(eng *engine.Engine, version version.Version, w http.Res if vars == nil { return fmt.Errorf("Missing parameter") } - var job = eng.Job("inspect", vars["name"], "container") + var job = eng.Job("container_inspect", vars["name"]) streamJSON(job, w, false) - job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job return job.Run() } @@ -861,9 +856,8 @@ func getImagesByName(eng *engine.Engine, version version.Version, w http.Respons if vars == nil { return fmt.Errorf("Missing parameter") } - var job = eng.Job("inspect", vars["name"], "image") + var job = eng.Job("image_inspect", vars["name"]) streamJSON(job, w, false) - job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job return job.Run() } @@ -872,6 +866,8 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = ®istry.AuthConfig{} configFileEncoded = r.Header.Get("X-Registry-Config") configFile = ®istry.ConfigFile{} job = eng.Job("build") @@ -881,18 +877,12 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite // Both headers will be parsed and sent along to the daemon, but if a non-empty // ConfigFile is present, any value provided as an AuthConfig directly will // be overridden. See BuildFile::CmdFrom for details. - var ( - authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = ®istry.AuthConfig{} - ) if version.LessThan("1.9") && authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} - } else { - configFile.Configs[authConfig.ServerAddress] = *authConfig } } @@ -911,13 +901,22 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } + + if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else { + job.Setenv("rm", r.FormValue("rm")) + } job.Stdin.Add(r.Body) job.Setenv("remote", r.FormValue("remote")) job.Setenv("t", r.FormValue("t")) job.Setenv("q", r.FormValue("q")) job.Setenv("nocache", r.FormValue("nocache")) - job.Setenv("rm", r.FormValue("rm")) - job.SetenvJson("auth", configFile) + job.Setenv("forcerm", r.FormValue("forcerm")) + job.SetenvJson("authConfig", authConfig) + job.SetenvJson("configFile", configFile) if err := job.Run(); err != nil { if !job.Stdout.Used() { @@ -1196,6 +1195,7 @@ func changeGroup(addr string, nameOrGid string) error { // ListenAndServe sets up the required http.Server and gets it listening for // each addr passed in and does protocol specific checking. func ListenAndServe(proto, addr string, job *engine.Job) error { + var l net.Listener r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { return err @@ -1211,7 +1211,11 @@ func ListenAndServe(proto, addr string, job *engine.Job) error { } } - l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock) + if job.GetenvBool("BufferRequests") { + l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock) + } else { + l, err = net.Listen(proto, addr) + } if err != nil { return err } @@ -1283,10 +1287,6 @@ func ServeApi(job *engine.Job) engine.Status { ) activationLock = make(chan struct{}) - if err := job.Eng.Register("acceptconnections", AcceptConnections); err != nil { - return job.Error(err) - } - for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { @@ -1313,7 +1313,9 @@ func AcceptConnections(job *engine.Job) engine.Status { go systemd.SdNotify("READY=1") // close the lock so the listeners start accepting connections - close(activationLock) + if activationLock != nil { + close(activationLock) + } return engine.StatusOK } diff --git a/archive/README.md b/archive/README.md new file mode 100644 index 0000000000..4eb0c04181 --- /dev/null +++ b/archive/README.md @@ -0,0 +1,3 @@ +This code provides helper functions for dealing with archive files. + +**TODO**: Move this to either `pkg` or (if not possible) to `utils`. diff --git a/archive/archive.go b/archive/archive.go index 2fac18e99f..76c6e31289 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -1,14 +1,12 @@ package archive import ( + "bufio" "bytes" "compress/bzip2" "compress/gzip" "errors" "fmt" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "os" @@ -17,6 +15,10 @@ import ( "path/filepath" "strings" "syscall" + + "github.com/dotcloud/docker/pkg/system" + "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) type ( @@ -26,6 +28,7 @@ type ( TarOptions struct { Includes []string Compression Compression + NoLchown bool } ) @@ -41,26 +44,16 @@ const ( ) func DetectCompression(source []byte) Compression { - sourceLen := len(source) for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, } { - fail := false - if len(m) > sourceLen { + if len(source) < len(m) { utils.Debugf("Len too short") continue } - i := 0 - for _, b := range m { - if b != source[i] { - fail = true - break - } - i++ - } - if !fail { + if bytes.Compare(m, source[:len(m)]) == 0 { return compression } } @@ -74,31 +67,24 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, error) { } func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - buf := make([]byte, 10) - totalN := 0 - for totalN < 10 { - n, err := archive.Read(buf[totalN:]) - if err != nil { - if err == io.EOF { - return nil, fmt.Errorf("Tarball too short") - } - return nil, err - } - totalN += n - utils.Debugf("[tar autodetect] n: %d", n) + buf := bufio.NewReader(archive) + bs, err := buf.Peek(10) + if err != nil { + return nil, err } - compression := DetectCompression(buf) - wrap := io.MultiReader(bytes.NewReader(buf), archive) + utils.Debugf("[tar autodetect] n: %v", bs) + + compression := DetectCompression(bs) switch compression { case Uncompressed: - return ioutil.NopCloser(wrap), nil + return ioutil.NopCloser(buf), nil case Gzip: - return gzip.NewReader(wrap) + return gzip.NewReader(buf) case Bzip2: - return ioutil.NopCloser(bzip2.NewReader(wrap)), nil + return ioutil.NopCloser(bzip2.NewReader(buf)), nil case Xz: - return xzDecompress(wrap) + return xzDecompress(buf) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } @@ -194,7 +180,7 @@ func addTarFile(path, name string, tw *tar.Writer) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -255,7 +241,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) e return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) } - if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown { return err } @@ -309,8 +295,11 @@ func escapeName(name string) string { return string(escaped) } -// Tar creates an archive from the directory at `path`, only including files whose relative -// paths are included in `filter`. If `filter` is nil, then all files are included. +// TarFilter creates an archive from the directory at `srcPath` with `options`, and returns it as a +// stream of bytes. +// +// Files are included according to `options.Includes`, default to including all files. +// Stream is compressed according to `options.Compression', default to Uncompressed. func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) { pipeReader, pipeWriter := io.Pipe() @@ -418,14 +407,16 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error { // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { + if fi.IsDir() && hdr.Name == "." { + continue + } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } - - if err := createTarFile(path, dest, hdr, tr); err != nil { + if err := createTarFile(path, dest, hdr, tr, options == nil || !options.NoLchown); err != nil { return err } diff --git a/archive/archive_test.go b/archive/archive_test.go index 412660139c..72ffd99565 100644 --- a/archive/archive_test.go +++ b/archive/archive_test.go @@ -3,7 +3,6 @@ package archive import ( "bytes" "fmt" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "os" @@ -11,6 +10,8 @@ import ( "path" "testing" "time" + + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) func TestCmdStreamLargeStderr(t *testing.T) { @@ -132,8 +133,37 @@ func TestTarUntar(t *testing.T) { // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - err := createTarFile("pax_global_header", "some_dir", &hdr, nil) + err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true) if err != nil { t.Fatal(err) } } + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatal("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} diff --git a/archive/changes.go b/archive/changes.go index 723e4a7425..88cea0f709 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -3,15 +3,16 @@ package archive import ( "bytes" "fmt" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "os" "path/filepath" "strings" "syscall" "time" + + "github.com/dotcloud/docker/pkg/system" + "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) type ChangeType int @@ -293,13 +294,23 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) { // Compare two directories and generate an array of Change objects describing the changes func ChangesDirs(newDir, oldDir string) ([]Change, error) { - oldRoot, err := collectFileInfo(oldDir) - if err != nil { - return nil, err - } - newRoot, err := collectFileInfo(newDir) - if err != nil { - return nil, err + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, err + } } return newRoot.Changes(oldRoot), nil diff --git a/archive/diff.go b/archive/diff.go index 87e8ac7dc4..d169669126 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -2,14 +2,14 @@ package archive import ( "fmt" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "os" "path/filepath" "strings" "syscall" - "time" + + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. @@ -18,15 +18,6 @@ import ( func mkdev(major int64, minor int64) uint32 { return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) } -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. @@ -89,7 +80,7 @@ func ApplyLayer(dest string, layer ArchiveReader) error { } defer os.RemoveAll(aufsTempdir) } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr); err != nil { + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil { return err } } @@ -136,7 +127,7 @@ func ApplyLayer(dest string, layer ArchiveReader) error { srcData = tmpFile } - if err := createTarFile(path, dest, srcHdr, srcData); err != nil { + if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil { return err } diff --git a/archive/testdata/broken.tar b/archive/testdata/broken.tar new file mode 100644 index 0000000000..8f10ea6b87 Binary files /dev/null and b/archive/testdata/broken.tar differ diff --git a/archive/time_linux.go b/archive/time_linux.go new file mode 100644 index 0000000000..3448569b1e --- /dev/null +++ b/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/archive/time_unsupported.go b/archive/time_unsupported.go new file mode 100644 index 0000000000..e85aac0540 --- /dev/null +++ b/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/builtins/builtins.go b/builtins/builtins.go index 40d421f154..3e0041c9d7 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -1,11 +1,16 @@ package builtins import ( - api "github.com/dotcloud/docker/api/server" + "runtime" + + "github.com/dotcloud/docker/api" + apiserver "github.com/dotcloud/docker/api/server" "github.com/dotcloud/docker/daemon/networkdriver/bridge" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/server" + "github.com/dotcloud/docker/utils" ) func Register(eng *engine.Engine) error { @@ -15,12 +20,18 @@ func Register(eng *engine.Engine) error { if err := remote(eng); err != nil { return err } + if err := eng.Register("version", dockerVersion); err != nil { + return err + } return registry.NewService().Install(eng) } // remote: a RESTful api for cross-docker communication func remote(eng *engine.Engine) error { - return eng.Register("serveapi", api.ServeApi) + if err := eng.Register("serveapi", apiserver.ServeApi); err != nil { + return err + } + return eng.Register("acceptconnections", apiserver.AcceptConnections) } // daemon: a default execution and storage backend for Docker on Linux, @@ -44,3 +55,21 @@ func daemon(eng *engine.Engine) error { } return eng.Register("init_networkdriver", bridge.InitDriver) } + +// builtins jobs independent of any subsystem +func dockerVersion(job *engine.Job) engine.Status { + v := &engine.Env{} + v.Set("Version", dockerversion.VERSION) + v.SetJson("ApiVersion", api.APIVERSION) + v.Set("GitCommit", dockerversion.GITCOMMIT) + v.Set("GoVersion", runtime.Version()) + v.Set("Os", runtime.GOOS) + v.Set("Arch", runtime.GOARCH) + if kernelVersion, err := utils.GetKernelVersion(); err == nil { + v.Set("KernelVersion", kernelVersion.String()) + } + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/contrib/check-config.sh b/contrib/check-config.sh index 498ede8af3..8dd618cb67 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -116,7 +116,7 @@ fi flags=( NAMESPACES {NET,PID,IPC,UTS}_NS DEVPTS_MULTIPLE_INSTANCES - CGROUPS CGROUP_DEVICE + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_SCHED MACVLAN VETH BRIDGE NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} diff --git a/contrib/desktop-integration/data/Dockerfile b/contrib/desktop-integration/data/Dockerfile index 76846af912..236912f904 100644 --- a/contrib/desktop-integration/data/Dockerfile +++ b/contrib/desktop-integration/data/Dockerfile @@ -6,7 +6,7 @@ # /data volume is owned by sysadmin. # USAGE: # # Download data Dockerfile -# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile +# wget http://raw.githubusercontent.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile # # # Build data image # docker build -t data . diff --git a/contrib/desktop-integration/iceweasel/Dockerfile b/contrib/desktop-integration/iceweasel/Dockerfile index f9f58c9ca5..80d6a55e4a 100644 --- a/contrib/desktop-integration/iceweasel/Dockerfile +++ b/contrib/desktop-integration/iceweasel/Dockerfile @@ -7,7 +7,7 @@ # sound devices. Tested on Debian 7.2 # USAGE: # # Download Iceweasel Dockerfile -# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile +# wget http://raw.githubusercontent.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile # # # Build iceweasel image # docker build -t iceweasel . diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker index 67f0d2807f..9b50fad448 100755 --- a/contrib/init/sysvinit-debian/docker +++ b/contrib/init/sysvinit-debian/docker @@ -4,6 +4,8 @@ # Provides: docker # Required-Start: $syslog $remote_fs # Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Create lightweight, portable, self-sufficient containers. diff --git a/contrib/init/sysvinit-redhat/docker b/contrib/init/sysvinit-redhat/docker index 2b75c6903f..06699f6ab1 100755 --- a/contrib/init/sysvinit-redhat/docker +++ b/contrib/init/sysvinit-redhat/docker @@ -3,7 +3,7 @@ # /etc/rc.d/init.d/docker # # Daemon for docker.io -# +# # chkconfig: 2345 95 95 # description: Daemon for docker.io @@ -49,6 +49,13 @@ start() { $exec -d $other_args &>> $logfile & pid=$! touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/dotcloud/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + done success echo else diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf index e27d77e145..db00e4f47f 100644 --- a/contrib/init/upstart/docker.conf +++ b/contrib/init/upstart/docker.conf @@ -1,6 +1,6 @@ description "Docker daemon" -start on filesystem +start on local-filesystems stop on runlevel [!2345] limit nofile 524288 1048576 limit nproc 524288 1048576 diff --git a/contrib/man/md/Dockerfile.5.md b/contrib/man/md/Dockerfile.5.md new file mode 100644 index 0000000000..c90ebf9a58 --- /dev/null +++ b/contrib/man/md/Dockerfile.5.md @@ -0,0 +1,41 @@ +% DOCKERFILE(1) Docker User Manuals +% Zac Dover +% May 2014 +# NAME + +Dockerfile - automate the steps of creating a Docker image + +# INTRODUCTION +**Dockerfile** is a configuration file that automates the steps of creating a Docker image. Docker can act as a builder and can read instructions from **Dockerfile** to automate the steps that you would otherwise manually perform to create an image. To build an image from a source repository, create a description file called **Dockerfile** at the root of your repository. This file describes the steps that will be taken to assemble the image. When **Dockerfile** has been created, call **docker build** with the path of the source repository as the argument. + +# SYNOPSIS + +INSTRUCTION arguments + +For example: + +FROM image + +# DESCRIPTION + +Dockerfile is a file that automates the steps of creating a Docker image. + +# USAGE + +$ sudo docker build . + -- runs the steps and commits them, building a final image + The path to the source repository defines where to find the context of the build. + The build is run by the docker daemon, not the CLI. The whole context must be + transferred to the daemon. The Docker CLI reports "Uploading context" when the + context is sent to the daemon. + +$ sudo docker build -t repository/tag . + -- specifies a repository and tag at which to save the new image if the build succeeds. + The Docker daemon runs the steps one-by-one, commiting the result to a new image + if necessary before finally outputting the ID of the new image. The Docker + daemon automatically cleans up the context it is given. + +Docker re-uses intermediate images whenever possible. This significantly accelerates the *docker build* process. + +# HISTORY +May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.io Dockerfile documentation. diff --git a/contrib/man/md/docker-run.1.md b/contrib/man/md/docker-run.1.md index 56364f9d5f..2ebf82b6a5 100644 --- a/contrib/man/md/docker-run.1.md +++ b/contrib/man/md/docker-run.1.md @@ -164,7 +164,7 @@ and foreground Docker containers. Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices. -When the operator executes **docker run -privileged**, Docker will enable access +When the operator executes **docker run --privileged**, Docker will enable access to all devices on the host as well as set some configuration in AppArmor to allow the container nearly all the same access to the host as processes running outside of a container on the host. @@ -190,18 +190,28 @@ interactive shell. The default is value is false. Set a username or UID for the container. -**-v**, **-volume**=*volume* - Bind mount a volume to the container. The **-v** option can be used one or +**-v**, **-volume**=*volume*[:ro|:rw] + Bind mount a volume to the container. + +The **-v** option can be used one or more times to add one or more mounts to a container. These mounts can then be -used in other containers using the **--volumes-from** option. See examples. +used in other containers using the **--volumes-from** option. +The volume may be optionally suffixed with :ro or :rw to mount the volumes in +read-only or read-write mode, respectively. By default, the volumes are mounted +read-write. See examples. -**--volumes-from**=*container-id* +**--volumes-from**=*container-id*[:ro|:rw] Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the **--volumes-from** option when running those other containers. The volumes can be shared even if the original container with the -mount is not running. +mount is not running. + +The container ID may be optionally suffixed with :ro or +:rw to mount the volumes in read-only or read-write mode, respectively. By +default, the volumes are mounted in the same mode (read write or read only) as +the reference container. **-w**, **-workdir**=*directory* @@ -307,7 +317,7 @@ fedora-data image: # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash -Multiple -volumes-from parameters will bring together multiple data volumes from +Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data: diff --git a/contrib/man/md/docker.1.md b/contrib/man/md/docker.1.md index d1ddf192b5..0071a71c92 100644 --- a/contrib/man/md/docker.1.md +++ b/contrib/man/md/docker.1.md @@ -73,7 +73,7 @@ port=[4243] or path =[/var/run/docker.sock] is omitted, default values are used. **-v**=*true*|*false* Print version information and quit. Default is false. -**--selinux-enabled=*true*|*false* +**--selinux-enabled**=*true*|*false* Enable selinux support. Default is false. # COMMANDS diff --git a/contrib/man/old-man/docker-run.1 b/contrib/man/old-man/docker-run.1 index fd449374e3..ae0295943d 100644 --- a/contrib/man/old-man/docker-run.1 +++ b/contrib/man/old-man/docker-run.1 @@ -245,7 +245,7 @@ docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash .RE .sp .TP -Multiple -volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data: +Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data: .sp .RS docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash diff --git a/contrib/mkimage-busybox.sh b/contrib/mkimage-busybox.sh index c1bb88c350..cbaa567834 100755 --- a/contrib/mkimage-busybox.sh +++ b/contrib/mkimage-busybox.sh @@ -2,6 +2,10 @@ # Generate a very minimal filesystem based on busybox-static, # and load it into the local docker under the name "busybox". +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' +echo >&2 + BUSYBOX=$(which busybox) [ "$BUSYBOX" ] || { echo "Sorry, I could not locate busybox." diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh index 613066e16b..808f393549 100755 --- a/contrib/mkimage-debootstrap.sh +++ b/contrib/mkimage-debootstrap.sh @@ -1,6 +1,10 @@ #!/usr/bin/env bash set -e +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' +echo >&2 + variant='minbase' include='iproute,iputils-ping' arch='amd64' # intentionally undocumented for now diff --git a/contrib/mkimage-rinse.sh b/contrib/mkimage-rinse.sh index dfe9999d92..0692ae1794 100755 --- a/contrib/mkimage-rinse.sh +++ b/contrib/mkimage-rinse.sh @@ -8,6 +8,10 @@ set -e +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' +echo >&2 + repo="$1" distro="$2" mirror="$3" diff --git a/contrib/mkimage.sh b/contrib/mkimage.sh new file mode 100755 index 0000000000..db4815c204 --- /dev/null +++ b/contrib/mkimage.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +optTemp=$(getopt --options '+d:t:h' --longoptions 'dir:,tag:,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar.xz" +touch "$tarFile" + +( + set -x + tar --numeric-owner -caf "$tarFile" -C "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <<'EOF' +FROM scratch +ADD rootfs.tar.xz / +EOF + +# if our generated image has a decent shell, let's set a default command +for shell in /bin/bash /usr/bin/fish /usr/bin/zsh /bin/sh; do + if [ -x "$rootfsDir/$shell" ]; then + ( set -x; echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/contrib/mkimage/.febootstrap-minimize b/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 0000000000..7dab4eb8b5 --- /dev/null +++ b/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + #rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/contrib/mkimage/busybox-static b/contrib/mkimage/busybox-static new file mode 100755 index 0000000000..e15322b49d --- /dev/null +++ b/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap new file mode 100755 index 0000000000..4747a84d31 --- /dev/null +++ b/contrib/mkimage/debootstrap @@ -0,0 +1,125 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +shift + +( + set -x + debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ cat > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF' +#!/bin/sh +exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + chroot "$rootfsDir" dpkg-divert --local --rename --add /sbin/initctl + ln -sf /bin/true "$rootfsDir/sbin/initctl" +) + +# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) +( set -x; chroot "$rootfsDir" apt-get clean ) + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + echo 'force-unsafe-io' > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + echo 'Acquire::Languages "none";' > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + case "$lsbDist" in + debian|Debian) + # updates and security! + if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then + ( + set -x + sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + ubuntu|Ubuntu) + # add the universe, updates, and security repositories + ( + set -x + sed -i " + s/ $suite main$/ $suite main universe/; p; + s/ $suite main/ ${suite}-updates main/; p; + s/ $suite-updates main/ ${suite}-security main/ + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu|Tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos|SteamOS) + # add contrib and non-free + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" apt-get update + chroot "$rootfsDir" apt-get dist-upgrade -y +) diff --git a/contrib/mkimage/rinse b/contrib/mkimage/rinse new file mode 100755 index 0000000000..75eb4f0d9d --- /dev/null +++ b/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/daemon/README.md b/daemon/README.md new file mode 100644 index 0000000000..64bfcb55ee --- /dev/null +++ b/daemon/README.md @@ -0,0 +1,10 @@ +This directory contains code pertaining to running containers and storing images + +Code pertaining to running containers: + + - execdriver + - networkdriver + +Code pertaining to storing images: + + - graphdriver diff --git a/daemon/container.go b/daemon/container.go index 7b6b65494e..e8bc7d478b 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -9,6 +9,7 @@ import ( "log" "os" "path" + "path/filepath" "strings" "sync" "syscall" @@ -89,7 +90,7 @@ func (container *Container) Inject(file io.Reader, pth string) error { defer container.Unmount() // Return error if path exists - destPath := path.Join(container.basefs, pth) + destPath := container.getResourcePath(pth) if _, err := os.Stat(destPath); err == nil { // Since err is nil, the path could be stat'd and it exists return fmt.Errorf("%s exists", pth) @@ -101,7 +102,7 @@ func (container *Container) Inject(file io.Reader, pth string) error { } // Make sure the directory exists - if err := os.MkdirAll(path.Join(container.basefs, path.Dir(pth)), 0755); err != nil { + if err := os.MkdirAll(container.getResourcePath(path.Dir(pth)), 0755); err != nil { return err } @@ -170,6 +171,16 @@ func (container *Container) WriteHostConfig() (err error) { return ioutil.WriteFile(container.hostConfigPath(), data, 0666) } +func (container *Container) getResourcePath(path string) string { + cleanPath := filepath.Join("/", path) + return filepath.Join(container.basefs, cleanPath) +} + +func (container *Container) getRootResourcePath(path string) string { + cleanPath := filepath.Join("/", path) + return filepath.Join(container.root, cleanPath) +} + func populateCommand(c *Container, env []string) error { var ( en *execdriver.Network @@ -215,6 +226,7 @@ func populateCommand(c *Container, env []string) error { Memory: c.Config.Memory, MemorySwap: c.Config.MemorySwap, CpuShares: c.Config.CpuShares, + Cpuset: c.Config.Cpuset, } c.command = &execdriver.Command{ ID: c.ID, @@ -344,7 +356,7 @@ func (container *Container) StderrLogPipe() io.ReadCloser { } func (container *Container) buildHostnameFile() error { - container.HostnamePath = path.Join(container.root, "hostname") + container.HostnamePath = container.getRootResourcePath("hostname") if container.Config.Domainname != "" { return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) } @@ -356,7 +368,7 @@ func (container *Container) buildHostnameAndHostsFiles(IP string) error { return err } - container.HostsPath = path.Join(container.root, "hosts") + container.HostsPath = container.getRootResourcePath("hosts") extraContent := make(map[string]string) @@ -640,7 +652,7 @@ func (container *Container) Export() (archive.Archive, error) { } func (container *Container) WaitTimeout(timeout time.Duration) error { - done := make(chan bool) + done := make(chan bool, 1) go func() { container.Wait() done <- true @@ -674,7 +686,7 @@ func (container *Container) Unmount() error { } func (container *Container) logPath(name string) string { - return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name)) + return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name)) } func (container *Container) ReadLog(name string) (io.Reader, error) { @@ -682,11 +694,11 @@ func (container *Container) ReadLog(name string) (io.Reader, error) { } func (container *Container) hostConfigPath() string { - return path.Join(container.root, "hostconfig.json") + return container.getRootResourcePath("hostconfig.json") } func (container *Container) jsonPath() string { - return path.Join(container.root, "config.json") + return container.getRootResourcePath("config.json") } // This method must be exported to be used from the lxc template @@ -745,8 +757,10 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) { if err := container.Mount(); err != nil { return nil, err } + var filter []string - basePath := path.Join(container.basefs, resource) + + basePath := container.getResourcePath(resource) stat, err := os.Stat(basePath) if err != nil { container.Unmount() @@ -844,7 +858,7 @@ func (container *Container) setupContainerDns() error { } else if len(daemon.config.DnsSearch) > 0 { dnsSearch = daemon.config.DnsSearch } - container.ResolvConfPath = path.Join(container.root, "resolv.conf") + container.ResolvConfPath = container.getRootResourcePath("resolv.conf") return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch) } else { container.ResolvConfPath = "/etc/resolv.conf" @@ -865,9 +879,17 @@ func (container *Container) initializeNetworking() error { container.Config.Hostname = parts[0] container.Config.Domainname = parts[1] } - container.HostsPath = "/etc/hosts" - return container.buildHostnameFile() + content, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + return container.buildHostnameAndHostsFiles("") + } + if err != nil { + return err + } + + container.HostsPath = container.getRootResourcePath("hosts") + return ioutil.WriteFile(container.HostsPath, content, 0644) } else if container.hostConfig.NetworkMode.IsContainer() { // we need to get the hosts files from the container to join nc, err := container.getNetworkedContainer() @@ -982,12 +1004,12 @@ func (container *Container) setupWorkingDirectory() error { if container.Config.WorkingDir != "" { container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) - pthInfo, err := os.Stat(path.Join(container.basefs, container.Config.WorkingDir)) + pthInfo, err := os.Stat(container.getResourcePath(container.Config.WorkingDir)) if err != nil { if !os.IsNotExist(err) { return err } - if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil { + if err := os.MkdirAll(container.getResourcePath(container.Config.WorkingDir), 0755); err != nil { return err } } diff --git a/daemon/daemon.go b/daemon/daemon.go index 00b6d9eee2..4ea6416ca5 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -64,6 +64,11 @@ type Daemon struct { execDriver execdriver.Driver } +// Install installs daemon capabilities to eng. +func (daemon *Daemon) Install(eng *engine.Engine) error { + return eng.Register("container_inspect", daemon.ContainerInspect) +} + // Mountpoints should be private to the container func remountPrivate(mountPoint string) error { mounted, err := mount.Mounted(mountPoint) @@ -85,6 +90,7 @@ func (daemon *Daemon) List() []*Container { for e := daemon.containers.Front(); e != nil; e = e.Next() { containers.Add(e.Value.(*Container)) } + containers.Sort() return *containers } @@ -141,7 +147,13 @@ func (daemon *Daemon) load(id string) (*Container, error) { } // Register makes a container object usable by the daemon as +// This is a wrapper for register func (daemon *Daemon) Register(container *Container) error { + return daemon.register(container, true) +} + +// register makes a container object usable by the daemon as +func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { if container.daemon != nil || daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } @@ -165,7 +177,14 @@ func (daemon *Daemon) Register(container *Container) error { } // done daemon.containers.PushBack(container) - daemon.idIndex.Add(container.ID) + + // don't update the Suffixarray if we're starting up + // we'll waste time if we update it for every container + if updateSuffixarray { + daemon.idIndex.Add(container.ID) + } else { + daemon.idIndex.AddWithoutSuffixarrayUpdate(container.ID) + } // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock @@ -277,6 +296,10 @@ func (daemon *Daemon) Destroy(container *Container) error { daemon.idIndex.Delete(container.ID) daemon.containers.Remove(element) + if _, err := daemon.containerGraph.Purge(container.ID); err != nil { + utils.Debugf("Unable to remove container from link graph: %s", err) + } + if err := daemon.driver.Remove(container.ID); err != nil { return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) } @@ -286,10 +309,6 @@ func (daemon *Daemon) Destroy(container *Container) error { return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err) } - if _, err := daemon.containerGraph.Purge(container.ID); err != nil { - utils.Debugf("Unable to remove container from link graph: %s", err) - } - if err := os.RemoveAll(container.root); err != nil { return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) } @@ -329,8 +348,8 @@ func (daemon *Daemon) restore() error { } } - register := func(container *Container) { - if err := daemon.Register(container); err != nil { + registerContainer := func(container *Container) { + if err := daemon.register(container, false); err != nil { utils.Debugf("Failed to register container %s: %s", container.ID, err) } } @@ -342,7 +361,7 @@ func (daemon *Daemon) restore() error { } e := entities[p] if container, ok := containers[e.ID()]; ok { - register(container) + registerContainer(container) delete(containers, e.ID()) } } @@ -359,9 +378,10 @@ func (daemon *Daemon) restore() error { if _, err := daemon.containerGraph.Set(container.Name, container.ID); err != nil { utils.Debugf("Setting default id - %s", err) } - register(container) + registerContainer(container) } + daemon.idIndex.UpdateSuffixarray() if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf(": done.\n") } @@ -592,15 +612,18 @@ func (daemon *Daemon) Commit(container *Container, repository, tag, comment, aut containerID, containerImage string containerConfig *runconfig.Config ) + if container != nil { containerID = container.ID containerImage = container.Image containerConfig = container.Config } + img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config) if err != nil { return nil, err } + // Register the image if needed if repository != "" { if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil { @@ -667,6 +690,35 @@ func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error return nil } +func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { + if hostConfig != nil && hostConfig.Links != nil { + for _, l := range hostConfig.Links { + parts, err := utils.PartParser("name:alias", l) + if err != nil { + return err + } + child, err := daemon.GetByName(parts["name"]) + if err != nil { + return err + } + if child == nil { + return fmt.Errorf("Could not get container for %s", parts["name"]) + } + if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + hostConfig.Links = nil + if err := container.WriteHostConfig(); err != nil { + return err + } + } + return nil +} + // FIXME: harmonize with NewGraph() func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) { daemon, err := NewDaemonFromDirectory(config, eng) @@ -680,6 +732,12 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D if !config.EnableSelinuxSupport { selinux.SetDisabled() } + + // Create the root directory if it doesn't exists + if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + // Set the default driver graphdriver.DefaultDriver = config.GraphDriver @@ -842,6 +900,10 @@ func (daemon *Daemon) Close() error { utils.Errorf("daemon.containerGraph.Close(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } + if err := mount.Unmount(daemon.config.Root); err != nil { + utils.Errorf("daemon.Umount(%s): %s", daemon.config.Root, err.Error()) + errorsStrings = append(errorsStrings, err.Error()) + } if len(errorsStrings) > 0 { return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) } diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index 4837a398ea..a3e43e60ac 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -103,9 +103,10 @@ type NetworkInterface struct { } type Resources struct { - Memory int64 `json:"memory"` - MemorySwap int64 `json:"memory_swap"` - CpuShares int64 `json:"cpu_shares"` + Memory int64 `json:"memory"` + MemorySwap int64 `json:"memory_swap"` + CpuShares int64 `json:"cpu_shares"` + Cpuset string `json:"cpuset"` } type Mount struct { diff --git a/daemon/execdriver/execdrivers/execdrivers.go b/daemon/execdriver/execdrivers/execdrivers.go index 18db1f8026..2e18454a09 100644 --- a/daemon/execdriver/execdrivers/execdrivers.go +++ b/daemon/execdriver/execdrivers/execdrivers.go @@ -12,7 +12,7 @@ import ( func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { switch name { case "lxc": - // we want to five the lxc driver the full docker root because it needs + // we want to give the lxc driver the full docker root because it needs // to access and write config and template files in /var/lib/docker/containers/* // to be backwards compatible return lxc.NewDriver(root, sysInfo.AppArmor) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index d787d8d873..6b2b2cc46b 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -15,8 +15,8 @@ import ( "time" "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/utils" ) @@ -268,18 +268,14 @@ func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (in } output, err = d.getInfo(c.ID) - if err != nil { - output, err = d.getInfo(c.ID) + if err == nil { + info, err := parseLxcInfo(string(output)) if err != nil { return -1, err } - } - info, err := parseLxcInfo(string(output)) - if err != nil { - return -1, err - } - if info.Running { - return info.Pid, nil + if info.Running { + return info.Pid, nil + } } time.Sleep(50 * time.Millisecond) } diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index 7fdc5ce92b..d660df902a 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -15,7 +15,9 @@ lxc.network.type = veth lxc.network.link = {{.Network.Interface.Bridge}} lxc.network.name = eth0 lxc.network.mtu = {{.Network.Mtu}} -{{else if not .Network.HostNetworking}} +{{else if .Network.HostNetworking}} +lxc.network.type = none +{{else}} # network is disabled (-n=false) lxc.network.type = empty lxc.network.flags = up @@ -126,6 +128,9 @@ lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} {{if .Resources.CpuShares}} lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} {{end}} +{{if .Resources.Cpuset}} +lxc.cgroup.cpuset.cpus = {{.Resources.Cpuset}} +{{end}} {{end}} {{if .Config.lxc}} diff --git a/daemon/execdriver/native/configuration/parse.go b/daemon/execdriver/native/configuration/parse.go index 22fe4b0e66..f18a60f797 100644 --- a/daemon/execdriver/native/configuration/parse.go +++ b/daemon/execdriver/native/configuration/parse.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/pkg/units" ) type Action func(*libcontainer.Container, interface{}, string) error @@ -75,7 +75,7 @@ func memory(container *libcontainer.Container, context interface{}, value string return fmt.Errorf("cannot set cgroups when they are disabled") } - v, err := utils.RAMInBytes(value) + v, err := units.RAMInBytes(value) if err != nil { return err } @@ -88,7 +88,7 @@ func memoryReservation(container *libcontainer.Container, context interface{}, v return fmt.Errorf("cannot set cgroups when they are disabled") } - v, err := utils.RAMInBytes(value) + v, err := units.RAMInBytes(value) if err != nil { return err } @@ -109,12 +109,19 @@ func memorySwap(container *libcontainer.Container, context interface{}, value st } func addCap(container *libcontainer.Container, context interface{}, value string) error { - container.CapabilitiesMask[value] = true + container.Capabilities = append(container.Capabilities, value) return nil } func dropCap(container *libcontainer.Container, context interface{}, value string) error { - container.CapabilitiesMask[value] = false + // If the capability is specified multiple times, remove all instances. + for i, capability := range container.Capabilities { + if capability == value { + container.Capabilities = append(container.Capabilities[:i], container.Capabilities[i+1:]...) + } + } + + // The capability wasn't found so we will drop it anyways. return nil } diff --git a/daemon/execdriver/native/configuration/parse_test.go b/daemon/execdriver/native/configuration/parse_test.go index 1b0316b688..5524adb857 100644 --- a/daemon/execdriver/native/configuration/parse_test.go +++ b/daemon/execdriver/native/configuration/parse_test.go @@ -4,8 +4,19 @@ import ( "testing" "github.com/dotcloud/docker/daemon/execdriver/native/template" + "github.com/dotcloud/docker/pkg/libcontainer" ) +// Checks whether the expected capability is specified in the capabilities. +func hasCapability(expected string, capabilities []string) bool { + for _, capability := range capabilities { + if capability == expected { + return true + } + } + return false +} + func TestSetReadonlyRootFs(t *testing.T) { var ( container = template.New() @@ -39,10 +50,10 @@ func TestConfigurationsDoNotConflict(t *testing.T) { t.Fatal(err) } - if !container1.CapabilitiesMask["NET_ADMIN"] { + if !hasCapability("NET_ADMIN", container1.Capabilities) { t.Fatal("container one should have NET_ADMIN enabled") } - if container2.CapabilitiesMask["NET_ADMIN"] { + if hasCapability("NET_ADMIN", container2.Capabilities) { t.Fatal("container two should not have NET_ADMIN enabled") } } @@ -138,10 +149,10 @@ func TestAddCap(t *testing.T) { t.Fatal(err) } - if !container.CapabilitiesMask["MKNOD"] { + if !hasCapability("MKNOD", container.Capabilities) { t.Fatal("container should have MKNOD enabled") } - if !container.CapabilitiesMask["SYS_ADMIN"] { + if !hasCapability("SYS_ADMIN", container.Capabilities) { t.Fatal("container should have SYS_ADMIN enabled") } } @@ -154,14 +165,12 @@ func TestDropCap(t *testing.T) { } ) // enabled all caps like in privileged mode - for key := range container.CapabilitiesMask { - container.CapabilitiesMask[key] = true - } + container.Capabilities = libcontainer.GetAllCapabilities() if err := ParseConfiguration(container, nil, opts); err != nil { t.Fatal(err) } - if container.CapabilitiesMask["MKNOD"] { + if hasCapability("MKNOD", container.Capabilities) { t.Fatal("container should not have MKNOD enabled") } } diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index a7b3d9a107..3a7001db1a 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -3,6 +3,7 @@ package native import ( "fmt" "os" + "os/exec" "path/filepath" "github.com/dotcloud/docker/daemon/execdriver" @@ -10,6 +11,7 @@ import ( "github.com/dotcloud/docker/daemon/execdriver/native/template" "github.com/dotcloud/docker/pkg/apparmor" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/mount/nodes" ) // createContainer populates and configures the container type with the @@ -34,8 +36,6 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container if err := d.setPrivileged(container); err != nil { return nil, err } - } else { - container.Mounts = append(container.Mounts, libcontainer.Mount{Type: "devtmpfs"}) } if err := d.setupCgroups(container, c); err != nil { return nil, err @@ -46,7 +46,11 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container if err := d.setupLabels(container, c); err != nil { return nil, err } - if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil { + cmds := make(map[string]*exec.Cmd) + for k, v := range d.activeContainers { + cmds[k] = v.cmd + } + if err := configuration.ParseConfiguration(container, cmds, c.Config["native"]); err != nil { return nil, err } return container, nil @@ -82,10 +86,12 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver. } if c.Network.ContainerID != "" { - cmd := d.activeContainers[c.Network.ContainerID] - if cmd == nil || cmd.Process == nil { + active := d.activeContainers[c.Network.ContainerID] + if active == nil || active.cmd.Process == nil { return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) } + cmd := active.cmd + nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") container.Networks = append(container.Networks, &libcontainer.Network{ Type: "netns", @@ -97,14 +103,17 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver. return nil } -func (d *driver) setPrivileged(container *libcontainer.Container) error { - for key := range container.CapabilitiesMask { - container.CapabilitiesMask[key] = true - } +func (d *driver) setPrivileged(container *libcontainer.Container) (err error) { + container.Capabilities = libcontainer.GetAllCapabilities() container.Cgroups.DeviceAccess = true delete(container.Context, "restrictions") + container.OptionalDeviceNodes = nil + if container.RequiredDeviceNodes, err = nodes.GetHostDeviceNodes(); err != nil { + return err + } + if apparmor.IsEnabled() { container.Context["apparmor_profile"] = "unconfined" } @@ -117,6 +126,7 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C container.Cgroups.Memory = c.Resources.Memory container.Cgroups.MemoryReservation = c.Resources.Memory container.Cgroups.MemorySwap = c.Resources.MemorySwap + container.Cgroups.CpusetCpus = c.Resources.Cpuset } return nil } diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index 2e57729d4b..425403fa4e 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -7,14 +7,14 @@ import ( "os" "os/exec" "path/filepath" - "strconv" "strings" "syscall" "github.com/dotcloud/docker/daemon/execdriver" "github.com/dotcloud/docker/pkg/apparmor" - "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups/fs" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups/systemd" "github.com/dotcloud/docker/pkg/libcontainer/nsinit" "github.com/dotcloud/docker/pkg/system" ) @@ -53,24 +53,31 @@ func init() { }) } +type activeContainer struct { + container *libcontainer.Container + cmd *exec.Cmd +} + type driver struct { root string initPath string - activeContainers map[string]*exec.Cmd + activeContainers map[string]*activeContainer } func NewDriver(root, initPath string) (*driver, error) { if err := os.MkdirAll(root, 0700); err != nil { return nil, err } + // native driver root is at docker_root/execdriver/native. Put apparmor at docker_root if err := apparmor.InstallDefaultProfile(filepath.Join(root, "../..", BackupApparmorProfilePath)); err != nil { return nil, err } + return &driver{ root: root, initPath: initPath, - activeContainers: make(map[string]*exec.Cmd), + activeContainers: make(map[string]*activeContainer), }, nil } @@ -80,7 +87,10 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba if err != nil { return -1, err } - d.activeContainers[c.ID] = &c.Cmd + d.activeContainers[c.ID] = &activeContainer{ + container: container, + cmd: &c.Cmd, + } var ( dataPath = filepath.Join(d.root, c.ID) @@ -175,41 +185,18 @@ func (d *driver) Name() string { return fmt.Sprintf("%s-%s", DriverName, Version) } -// TODO: this can be improved with our driver -// there has to be a better way to do this func (d *driver) GetPidsForContainer(id string) ([]int, error) { - pids := []int{} + active := d.activeContainers[id] - subsystem := "devices" - cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) - if err != nil { - return pids, err - } - cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) - if err != nil { - return pids, err + if active == nil { + return nil, fmt.Errorf("active container for %s does not exist", id) } + c := active.container.Cgroups - filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") - if _, err := os.Stat(filename); os.IsNotExist(err) { - filename = filepath.Join(cgroupRoot, cgroupDir, "docker", id, "tasks") + if systemd.UseSystemd() { + return systemd.GetPids(c) } - - output, err := ioutil.ReadFile(filename) - if err != nil { - return pids, err - } - for _, p := range strings.Split(string(output), "\n") { - if len(p) == 0 { - continue - } - pid, err := strconv.Atoi(p) - if err != nil { - return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) - } - pids = append(pids, pid) - } - return pids, nil + return fs.GetPids(c) } func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error { @@ -225,6 +212,8 @@ func (d *driver) createContainerRoot(id string) error { } func (d *driver) removeContainerRoot(id string) error { + delete(d.activeContainers, id) + return os.RemoveAll(filepath.Join(d.root, id)) } diff --git a/daemon/execdriver/native/template/default_template.go b/daemon/execdriver/native/template/default_template.go index 249c5d5fe8..a80b609a1e 100644 --- a/daemon/execdriver/native/template/default_template.go +++ b/daemon/execdriver/native/template/default_template.go @@ -2,30 +2,25 @@ package template import ( "github.com/dotcloud/docker/pkg/apparmor" - "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/mount/nodes" ) // New returns the docker default configuration for libcontainer func New() *libcontainer.Container { container := &libcontainer.Container{ - CapabilitiesMask: map[string]bool{ - "SETPCAP": false, - "SYS_MODULE": false, - "SYS_RAWIO": false, - "SYS_PACCT": false, - "SYS_ADMIN": false, - "SYS_NICE": false, - "SYS_RESOURCE": false, - "SYS_TIME": false, - "SYS_TTY_CONFIG": false, - "AUDIT_WRITE": false, - "AUDIT_CONTROL": false, - "MAC_OVERRIDE": false, - "MAC_ADMIN": false, - "NET_ADMIN": false, - "MKNOD": true, - "SYSLOG": false, + Capabilities: []string{ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", }, Namespaces: map[string]bool{ "NEWNS": true, @@ -38,7 +33,9 @@ func New() *libcontainer.Container { Parent: "docker", DeviceAccess: false, }, - Context: libcontainer.Context{}, + Context: libcontainer.Context{}, + RequiredDeviceNodes: nodes.DefaultNodes, + OptionalDeviceNodes: []string{"/dev/fuse"}, } if apparmor.IsEnabled() { container.Context["apparmor_profile"] = "docker-default" diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index 12b7a77fb3..2b7aa1b68a 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -54,7 +54,7 @@ type Driver struct { func Init(root string) (graphdriver.Driver, error) { // Try to load the aufs kernel module if err := supportsAufs(); err != nil { - return nil, err + return nil, graphdriver.ErrNotSupported } paths := []string{ "mnt", diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go index 1ffa264aa1..dab5aecc41 100644 --- a/daemon/graphdriver/aufs/aufs_test.go +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -19,7 +19,7 @@ var ( func testInit(dir string, t *testing.T) graphdriver.Driver { d, err := Init(dir) if err != nil { - if err == ErrAufsNotSupported { + if err == graphdriver.ErrNotSupported { t.Skip(err) } else { t.Fatal(err) diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go index 4d195537eb..614dc1ff06 100644 --- a/daemon/graphdriver/btrfs/btrfs.go +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -31,7 +31,7 @@ func Init(home string) (graphdriver.Driver, error) { } if buf.Type != 0x9123683E { - return nil, fmt.Errorf("%s is not a btrfs filesystem", rootdir) + return nil, graphdriver.ErrNotSupported } return &Driver{ diff --git a/daemon/graphdriver/btrfs/btrfs_test.go b/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 0000000000..3069a98557 --- /dev/null +++ b/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,28 @@ +package btrfs + +import ( + "github.com/dotcloud/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/devmapper/attach_loopback.go b/daemon/graphdriver/devmapper/attach_loopback.go index 23339076e8..d2ab8c3a4b 100644 --- a/daemon/graphdriver/devmapper/attach_loopback.go +++ b/daemon/graphdriver/devmapper/attach_loopback.go @@ -4,6 +4,9 @@ package devmapper import ( "fmt" + "os" + "syscall" + "github.com/dotcloud/docker/utils" ) @@ -14,7 +17,7 @@ func stringToLoopName(src string) [LoNameSize]uint8 { } func getNextFreeLoopbackIndex() (int, error) { - f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644) + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) if err != nil { return 0, err } @@ -27,27 +30,27 @@ func getNextFreeLoopbackIndex() (int, error) { return index, err } -func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) { +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) index++ - fi, err := osStat(target) + fi, err := os.Stat(target) if err != nil { - if osIsNotExist(err) { + if os.IsNotExist(err) { utils.Errorf("There are no more loopback device available.") } return nil, ErrAttachLoopbackDevice } - if fi.Mode()&osModeDevice != osModeDevice { + if fi.Mode()&os.ModeDevice != os.ModeDevice { utils.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC - loopFile, err = osOpenFile(target, osORdWr, 0644) + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) if err != nil { utils.Errorf("Error openning loopback device: %s", err) return nil, ErrAttachLoopbackDevice @@ -58,7 +61,7 @@ func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, loopFile.Close() // If the error is EBUSY, then try the next loopback - if err != sysEBusy { + if err != syscall.EBUSY { utils.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } @@ -80,8 +83,8 @@ func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, } // attachLoopDevice attaches the given sparse file to the next -// available loopback device. It returns an opened *osFile. -func attachLoopDevice(sparseName string) (loop *osFile, err error) { +// available loopback device. It returns an opened *os.File. +func attachLoopDevice(sparseName string) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a @@ -92,7 +95,7 @@ func attachLoopDevice(sparseName string) (loop *osFile, err error) { } // OpenFile adds O_CLOEXEC - sparseFile, err := osOpenFile(sparseName, osORdWr, 0644) + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { utils.Errorf("Error openning sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index a96331d812..4de7858c1f 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -8,10 +8,11 @@ import ( "fmt" "io" "io/ioutil" + "os" + "os/exec" "path" "path/filepath" "strconv" - "strings" "sync" "syscall" "time" @@ -62,8 +63,7 @@ type DeviceSet struct { devicePrefix string TransactionId uint64 NewTransactionId uint64 - nextFreeDevice int - sawBusy bool + nextDeviceId int } type DiskUsage struct { @@ -109,7 +109,19 @@ func (devices *DeviceSet) loopbackDir() string { return path.Join(devices.root, "devicemapper") } -func (devices *DeviceSet) jsonFile() string { +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *DevInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) oldMetadataFile() string { return path.Join(devices.loopbackDir(), "json") } @@ -125,7 +137,7 @@ func (devices *DeviceSet) hasImage(name string) bool { dirname := devices.loopbackDir() filename := path.Join(dirname, name) - _, err := osStat(filename) + _, err := os.Stat(filename) return err == nil } @@ -137,16 +149,16 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { dirname := devices.loopbackDir() filename := path.Join(dirname, name) - if err := osMkdirAll(dirname, 0700); err != nil && !osIsExist(err) { + if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) { return "", err } - if _, err := osStat(filename); err != nil { - if !osIsNotExist(err) { + if _, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { return "", err } utils.Debugf("Creating loopback file %s for device-manage use", filename) - file, err := osOpenFile(filename, osORdWr|osOCreate, 0600) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return "", err } @@ -159,26 +171,24 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { return filename, nil } -func (devices *DeviceSet) allocateDeviceId() int { - // TODO: Add smarter reuse of deleted devices - id := devices.nextFreeDevice - devices.nextFreeDevice = devices.nextFreeDevice + 1 - return id -} - func (devices *DeviceSet) allocateTransactionId() uint64 { devices.NewTransactionId = devices.NewTransactionId + 1 return devices.NewTransactionId } -func (devices *DeviceSet) saveMetadata() error { - devices.devicesLock.Lock() - jsonData, err := json.Marshal(devices.MetaData) - devices.devicesLock.Unlock() +func (devices *DeviceSet) removeMetadata(info *DevInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +func (devices *DeviceSet) saveMetadata(info *DevInfo) error { + jsonData, err := json.Marshal(info) if err != nil { return fmt.Errorf("Error encoding metadata to json: %s", err) } - tmpFile, err := ioutil.TempFile(filepath.Dir(devices.jsonFile()), ".json") + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") if err != nil { return fmt.Errorf("Error creating metadata file: %s", err) } @@ -196,7 +206,7 @@ func (devices *DeviceSet) saveMetadata() error { if err := tmpFile.Close(); err != nil { return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) } - if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil { + if err := os.Rename(tmpFile.Name(), devices.metadataFile(info)); err != nil { return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) } @@ -214,7 +224,12 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { defer devices.devicesLock.Unlock() info := devices.Devices[hash] if info == nil { - return nil, fmt.Errorf("Unknown device %s", hash) + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("Unknown device %s", hash) + } + + devices.Devices[hash] = info } return info, nil } @@ -234,7 +249,7 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev devices.Devices[hash] = info devices.devicesLock.Unlock() - if err := devices.saveMetadata(); err != nil { + if err := devices.saveMetadata(info); err != nil { // Try to remove unused device devices.devicesLock.Lock() delete(devices.Devices, hash) @@ -258,9 +273,9 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { func (devices *DeviceSet) createFilesystem(info *DevInfo) error { devname := info.DevName() - err := execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname) + err := exec.Command("mkfs.ext4", "-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0", devname).Run() if err != nil { - err = execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname) + err = exec.Command("mkfs.ext4", "-E", "nodiscard,lazy_itable_init=0", devname).Run() } if err != nil { utils.Debugf("\n--->Err: %s\n", err) @@ -269,9 +284,7 @@ func (devices *DeviceSet) createFilesystem(info *DevInfo) error { return nil } -func (devices *DeviceSet) loadMetaData() error { - utils.Debugf("loadMetadata()") - defer utils.Debugf("loadMetadata END") +func (devices *DeviceSet) initMetaData() error { _, _, _, params, err := getStatus(devices.getPoolName()) if err != nil { utils.Debugf("\n--->Err: %s\n", err) @@ -284,37 +297,59 @@ func (devices *DeviceSet) loadMetaData() error { } devices.NewTransactionId = devices.TransactionId - jsonData, err := ioutil.ReadFile(devices.jsonFile()) - if err != nil && !osIsNotExist(err) { + // Migrate old metadatafile + + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { utils.Debugf("\n--->Err: %s\n", err) return err } - devices.MetaData.Devices = make(map[string]*DevInfo) if jsonData != nil { - if err := json.Unmarshal(jsonData, &devices.MetaData); err != nil { + m := MetaData{Devices: make(map[string]*DevInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } - } - for hash, d := range devices.Devices { - d.Hash = hash - d.devices = devices + for hash, info := range m.Devices { + info.Hash = hash - if d.DeviceId >= devices.nextFreeDevice { - devices.nextFreeDevice = d.DeviceId + 1 + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId <= devices.TransactionId { + devices.saveMetadata(info) + } + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err } - // If the transaction id is larger than the actual one we lost the device due to some crash - if d.TransactionId > devices.TransactionId { - utils.Debugf("Removing lost device %s with id %d", hash, d.TransactionId) - delete(devices.Devices, hash) - } } + return nil } +func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { + info := &DevInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + return nil + } + + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId > devices.TransactionId { + return nil + } + + return info +} + func (devices *DeviceSet) setupBaseImage() error { oldInfo, _ := devices.lookupDevice("") if oldInfo != nil && oldInfo.Initialized { @@ -331,14 +366,17 @@ func (devices *DeviceSet) setupBaseImage() error { utils.Debugf("Initializing base device-manager snapshot") - id := devices.allocateDeviceId() + id := devices.nextDeviceId // Create initial device - if err := createDevice(devices.getPoolDevName(), id); err != nil { + if err := createDevice(devices.getPoolDevName(), &id); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } + // Ids are 24bit, so wrap around + devices.nextDeviceId = (id + 1) & 0xffffff + utils.Debugf("Registering base device (id %v) with FS size %v", id, DefaultBaseFsSize) info, err := devices.registerDevice(id, "", DefaultBaseFsSize) if err != nil { @@ -360,7 +398,7 @@ func (devices *DeviceSet) setupBaseImage() error { } info.Initialized = true - if err = devices.saveMetadata(); err != nil { + if err = devices.saveMetadata(info); err != nil { info.Initialized = false utils.Debugf("\n--->Err: %s\n", err) return err @@ -372,11 +410,11 @@ func (devices *DeviceSet) setupBaseImage() error { func setCloseOnExec(name string) { if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { for _, i := range fileInfos { - link, _ := osReadlink(filepath.Join("/proc/self/fd", i.Name())) + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) if link == name { fd, err := strconv.Atoi(i.Name()) if err == nil { - sysCloseOnExec(fd) + syscall.CloseOnExec(fd) } } } @@ -388,10 +426,6 @@ func (devices *DeviceSet) log(level int, file string, line int, dmError int, mes return // Ignore _LOG_DEBUG } - if strings.Contains(message, "busy") { - devices.sawBusy = true - } - utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } @@ -408,7 +442,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { datafilename := path.Join(dirname, "data") metadatafilename := path.Join(dirname, "metadata") - datafile, err := osOpenFile(datafilename, osORdWr, 0) + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) if datafile == nil { return err } @@ -429,7 +463,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { } defer dataloopback.Close() - metadatafile, err := osOpenFile(metadatafilename, osORdWr, 0) + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) if metadatafile == nil { return err } @@ -472,39 +506,17 @@ func (devices *DeviceSet) ResizePool(size int64) error { func (devices *DeviceSet) initDevmapper(doInit bool) error { logInit(devices) - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - hasData := devices.hasImage("data") - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasData { - return errors.New("Loopback data file not found") - } - - if !doInit && !hasMetadata { - return errors.New("Loopback metadata file not found") - } - - createdLoopback := !hasData || !hasMetadata - data, err := devices.ensureImage("data", DefaultDataLoopbackSize) - if err != nil { - utils.Debugf("Error device ensureImage (data): %s\n", err) - return err - } - metadata, err := devices.ensureImage("metadata", DefaultMetaDataLoopbackSize) - if err != nil { - utils.Debugf("Error device ensureImage (metadata): %s\n", err) + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { return err } // Set the device prefix from the device id and inode of the docker root dir - st, err := osStat(devices.root) + st, err := os.Stat(devices.root) if err != nil { return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) } - sysSt := toSysStatT(st.Sys()) + sysSt := st.Sys().(*syscall.Stat_t) // "reg-" stands for "regular file". // In the future we might use "dev-" for "device file", etc. // docker-maj,min[-inode] stands for: @@ -527,10 +539,38 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // so we add this badhack to make sure it closes itself setCloseOnExec("/dev/mapper/control") + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + // If the pool doesn't exist, create it if info.Exists == 0 { utils.Debugf("Pool doesn't exist. Creating it.") + hasData := devices.hasImage("data") + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + createdLoopback = !hasData || !hasMetadata + data, err := devices.ensureImage("data", DefaultDataLoopbackSize) + if err != nil { + utils.Debugf("Error device ensureImage (data): %s\n", err) + return err + } + metadata, err := devices.ensureImage("metadata", DefaultMetaDataLoopbackSize) + if err != nil { + utils.Debugf("Error device ensureImage (metadata): %s\n", err) + return err + } + dataFile, err := attachLoopDevice(data) if err != nil { utils.Debugf("\n--->Err: %s\n", err) @@ -552,9 +592,9 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } // If we didn't just create the data or metadata image, we need to - // load the metadata from the existing file. + // load the transaction id and migrate old metadata if !createdLoopback { - if err = devices.loadMetaData(); err != nil { + if err = devices.initMetaData(); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } @@ -587,13 +627,16 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return fmt.Errorf("device %s already exists", hash) } - deviceId := devices.allocateDeviceId() + deviceId := devices.nextDeviceId - if err := devices.createSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { utils.Debugf("Error creating snap device: %s\n", err) return err } + // Ids are 24bit, so wrap around + devices.nextDeviceId = (deviceId + 1) & 0xffffff + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { deleteDevice(devices.getPoolDevName(), deviceId) utils.Debugf("Error registering device: %s\n", err) @@ -620,14 +663,6 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { } } - if info.Initialized { - info.Initialized = false - if err := devices.saveMetadata(); err != nil { - utils.Debugf("Error saving meta data: %s\n", err) - return err - } - } - if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { utils.Debugf("Error deleting device: %s\n", err) return err @@ -638,11 +673,11 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { delete(devices.Devices, info.Hash) devices.devicesLock.Unlock() - if err := devices.saveMetadata(); err != nil { + if err := devices.removeMetadata(info); err != nil { devices.devicesLock.Lock() devices.Devices[info.Hash] = info devices.devicesLock.Unlock() - utils.Debugf("Error saving meta data: %s\n", err) + utils.Debugf("Error removing meta data: %s\n", err) return err } @@ -711,12 +746,11 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error { var err error for i := 0; i < 1000; i++ { - devices.sawBusy = false err = removeDevice(devname) if err == nil { break } - if !devices.sawBusy { + if err != ErrBusy { return err } @@ -813,7 +847,7 @@ func (devices *DeviceSet) Shutdown() error { // We use MNT_DETACH here in case it is still busy in some running // container. This means it'll go away from the global scope directly, // and the device will be released when that container dies. - if err := sysUnmount(info.mountPath, syscall.MNT_DETACH); err != nil { + if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } @@ -871,13 +905,13 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } - var flags uintptr = sysMsMgcVal + var flags uintptr = syscall.MS_MGC_VAL mountOptions := label.FormatMountLabel("discard", mountLabel) - err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) - if err != nil && err == sysEInval { + err = syscall.Mount(info.DevName(), path, "ext4", flags, mountOptions) + if err != nil && err == syscall.EINVAL { mountOptions = label.FormatMountLabel("", mountLabel) - err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) + err = syscall.Mount(info.DevName(), path, "ext4", flags, mountOptions) } if err != nil { return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) @@ -886,7 +920,7 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { info.mountCount = 1 info.mountPath = path - return devices.setInitialized(info) + return nil } func (devices *DeviceSet) UnmountDevice(hash string) error { @@ -914,7 +948,7 @@ func (devices *DeviceSet) UnmountDevice(hash string) error { } utils.Debugf("[devmapper] Unmount(%s)", info.mountPath) - if err := sysUnmount(info.mountPath, 0); err != nil { + if err := syscall.Unmount(info.mountPath, 0); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } @@ -937,14 +971,6 @@ func (devices *DeviceSet) HasDevice(hash string) bool { return info != nil } -func (devices *DeviceSet) HasInitializedDevice(hash string) bool { - devices.Lock() - defer devices.Unlock() - - info, _ := devices.lookupDevice(hash) - return info != nil && info.Initialized -} - func (devices *DeviceSet) HasActivatedDevice(hash string) bool { info, _ := devices.lookupDevice(hash) if info == nil { @@ -961,17 +987,6 @@ func (devices *DeviceSet) HasActivatedDevice(hash string) bool { return devinfo != nil && devinfo.Exists != 0 } -func (devices *DeviceSet) setInitialized(info *DevInfo) error { - info.Initialized = true - if err := devices.saveMetadata(); err != nil { - info.Initialized = false - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - return nil -} - func (devices *DeviceSet) List() []string { devices.Lock() defer devices.Unlock() diff --git a/daemon/graphdriver/devmapper/devmapper.go b/daemon/graphdriver/devmapper/devmapper.go index 7317118dcf..315f0c0db5 100644 --- a/daemon/graphdriver/devmapper/devmapper.go +++ b/daemon/graphdriver/devmapper/devmapper.go @@ -5,9 +5,11 @@ package devmapper import ( "errors" "fmt" - "github.com/dotcloud/docker/utils" + "os" "runtime" "syscall" + + "github.com/dotcloud/docker/utils" ) type DevmapperLogger interface { @@ -62,6 +64,10 @@ var ( ErrInvalidAddNode = errors.New("Invalide AddNoce type") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") + ErrBusy = errors.New("Device is Busy") + + dmSawBusy bool + dmSawExist bool ) type ( @@ -180,7 +186,7 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, start, length, targetType, params } -func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { utils.Errorf("Error get loopback backing file: %s\n", err) @@ -189,7 +195,7 @@ func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { return loopInfo.loDevice, loopInfo.loInode, nil } -func LoopbackSetCapacity(file *osFile) error { +func LoopbackSetCapacity(file *os.File) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { utils.Errorf("Error loopbackSetCapacity: %s", err) return ErrLoopbackSetCapacity @@ -197,20 +203,20 @@ func LoopbackSetCapacity(file *osFile) error { return nil } -func FindLoopDeviceFor(file *osFile) *osFile { +func FindLoopDeviceFor(file *os.File) *os.File { stat, err := file.Stat() if err != nil { return nil } - targetInode := stat.Sys().(*sysStatT).Ino - targetDevice := stat.Sys().(*sysStatT).Dev + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) - file, err := osOpenFile(path, osORdWr, 0) + file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { - if osIsNotExist(err) { + if os.IsNotExist(err) { return nil } @@ -280,7 +286,7 @@ func RemoveDevice(name string) error { return nil } -func GetBlockDeviceSize(file *osFile) (uint64, error) { +func GetBlockDeviceSize(file *os.File) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { utils.Errorf("Error getblockdevicesize: %s", err) @@ -290,7 +296,7 @@ func GetBlockDeviceSize(file *osFile) (uint64, error) { } func BlockDeviceDiscard(path string) error { - file, err := osOpenFile(path, osORdWr, 0) + file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { return err } @@ -313,7 +319,7 @@ func BlockDeviceDiscard(path string) error { } // This is the programmatic example of "dmsetup create" -func createPool(poolName string, dataFile, metadataFile *osFile) error { +func createPool(poolName string, dataFile, metadataFile *os.File) error { task, err := createTask(DeviceCreate, poolName) if task == nil { return err @@ -343,7 +349,7 @@ func createPool(poolName string, dataFile, metadataFile *osFile) error { return nil } -func reloadPool(poolName string, dataFile, metadataFile *osFile) error { +func reloadPool(poolName string, dataFile, metadataFile *os.File) error { task, err := createTask(DeviceReload, poolName) if task == nil { return err @@ -464,23 +470,33 @@ func resumeDevice(name string) error { return nil } -func createDevice(poolName string, deviceId int) error { - utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, deviceId) - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - return err - } +func createDevice(poolName string, deviceId *int) error { + utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") - } + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } - if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { - return fmt.Errorf("Can't set message") - } + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector") + } - if err := task.Run(); err != nil { - return fmt.Errorf("Error running createDevice") + if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { + return fmt.Errorf("Can't set message") + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + return fmt.Errorf("Error running createDevice") + } + break } return nil } @@ -512,7 +528,11 @@ func removeDevice(name string) error { if task == nil { return err } + dmSawBusy = false if err = task.Run(); err != nil { + if dmSawBusy { + return ErrBusy + } return fmt.Errorf("Error running removeDevice") } return nil @@ -546,7 +566,7 @@ func activateDevice(poolName string, name string, deviceId int, size uint64) err return nil } -func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { +func createSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { devinfo, _ := getInfo(baseName) doSuspend := devinfo != nil && devinfo.Exists != 0 @@ -556,33 +576,44 @@ func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseNa } } - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - if doSuspend { - resumeDevice(baseName) + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + resumeDevice(baseName) + } + return err } - return err - } - if err := task.SetSector(0); err != nil { - if doSuspend { - resumeDevice(baseName) + if err := task.SetSector(0); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set sector") } - return fmt.Errorf("Can't set sector") - } - if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { - if doSuspend { - resumeDevice(baseName) + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set message") } - return fmt.Errorf("Can't set message") - } - if err := task.Run(); err != nil { - if doSuspend { - resumeDevice(baseName) + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Error running DeviceCreate (createSnapDevice)") } - return fmt.Errorf("Error running DeviceCreate (createSnapDevice)") + + break } if doSuspend { diff --git a/daemon/graphdriver/devmapper/devmapper_log.go b/daemon/graphdriver/devmapper/devmapper_log.go index 18dde7cca5..cdeaed2525 100644 --- a/daemon/graphdriver/devmapper/devmapper_log.go +++ b/daemon/graphdriver/devmapper/devmapper_log.go @@ -4,12 +4,27 @@ package devmapper import "C" +import ( + "strings" +) + // Due to the way cgo works this has to be in a separate file, as devmapper.go has // definitions in the cgo block, which is incompatible with using "//export" //export DevmapperLogCallback func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + } + if dmLogger != nil { - dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), C.GoString(message)) + dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) } } diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go index 3ffa163ceb..7c97d6bb04 100644 --- a/daemon/graphdriver/devmapper/devmapper_test.go +++ b/daemon/graphdriver/devmapper/devmapper_test.go @@ -3,285 +3,35 @@ package devmapper import ( + "github.com/dotcloud/docker/daemon/graphdriver/graphtest" "testing" ) -func TestTaskCreate(t *testing.T) { - t.Skip("FIXME: not a unit test") - // Test success - taskCreate(t, DeviceInfo) - - // Test Failure - DmTaskCreate = dmTaskCreateFail - defer func() { DmTaskCreate = dmTaskCreateFct }() - if task := TaskCreate(-1); task != nil { - t.Fatalf("An error should have occured while creating an invalid task.") - } +func init() { + // Reduce the size the the base fs and loopback for the tests + DefaultDataLoopbackSize = 300 * 1024 * 1024 + DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 + DefaultBaseFsSize = 300 * 1024 * 1024 } -func TestTaskRun(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - // Perform the RUN - if err := task.Run(); err != nil { - t.Fatal(err) - } - // Make sure we don't have error with GetInfo - if _, err := task.GetInfo(); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskRun = dmTaskRunFail - defer func() { DmTaskRun = dmTaskRunFct }() - - task = taskCreate(t, DeviceInfo) - // Perform the RUN - if err := task.Run(); err != ErrTaskRun { - t.Fatalf("An error should have occured while running task.") - } - // Make sure GetInfo also fails - if _, err := task.GetInfo(); err != ErrTaskGetInfo { - t.Fatalf("GetInfo should fail if task.Run() failed.") - } +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") } -func TestTaskSetName(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetName("test"); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetName = dmTaskSetNameFail - defer func() { DmTaskSetName = dmTaskSetNameFct }() - - if err := task.SetName("test"); err != ErrTaskSetName { - t.Fatalf("An error should have occured while runnign SetName.") - } +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") } -func TestTaskSetMessage(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetMessage("test"); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetMessage = dmTaskSetMessageFail - defer func() { DmTaskSetMessage = dmTaskSetMessageFct }() - - if err := task.SetMessage("test"); err != ErrTaskSetMessage { - t.Fatalf("An error should have occured while runnign SetMessage.") - } +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") } -func TestTaskSetSector(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetSector(128); err != nil { - t.Fatal(err) - } - - DmTaskSetSector = dmTaskSetSectorFail - defer func() { DmTaskSetSector = dmTaskSetSectorFct }() - - // Test failure - if err := task.SetSector(0); err != ErrTaskSetSector { - t.Fatalf("An error should have occured while running SetSector.") - } +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") } -func TestTaskSetCookie(t *testing.T) { - t.Skip("FIXME: not a unit test") - var ( - cookie uint = 0 - task = taskCreate(t, DeviceInfo) - ) - - // Test success - if err := task.SetCookie(&cookie, 0); err != nil { - t.Fatal(err) - } - - // Test failure - if err := task.SetCookie(nil, 0); err != ErrNilCookie { - t.Fatalf("An error should have occured while running SetCookie with nil cookie.") - } - - DmTaskSetCookie = dmTaskSetCookieFail - defer func() { DmTaskSetCookie = dmTaskSetCookieFct }() - - if err := task.SetCookie(&cookie, 0); err != ErrTaskSetCookie { - t.Fatalf("An error should have occured while running SetCookie.") - } -} - -func TestTaskSetAddNode(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetAddNode(0); err != nil { - t.Fatal(err) - } - - // Test failure - if err := task.SetAddNode(-1); err != ErrInvalidAddNode { - t.Fatalf("An error should have occured running SetAddNode with wrong node.") - } - - DmTaskSetAddNode = dmTaskSetAddNodeFail - defer func() { DmTaskSetAddNode = dmTaskSetAddNodeFct }() - - if err := task.SetAddNode(0); err != ErrTaskSetAddNode { - t.Fatalf("An error should have occured running SetAddNode.") - } -} - -func TestTaskSetRo(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetRo(); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetRo = dmTaskSetRoFail - defer func() { DmTaskSetRo = dmTaskSetRoFct }() - - if err := task.SetRo(); err != ErrTaskSetRo { - t.Fatalf("An error should have occured running SetRo.") - } -} - -func TestTaskAddTarget(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.AddTarget(0, 128, "thinp", ""); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskAddTarget = dmTaskAddTargetFail - defer func() { DmTaskAddTarget = dmTaskAddTargetFct }() - - if err := task.AddTarget(0, 128, "thinp", ""); err != ErrTaskAddTarget { - t.Fatalf("An error should have occured running AddTarget.") - } -} - -// func TestTaskGetInfo(t *testing.T) { -// task := taskCreate(t, DeviceInfo) - -// // Test success -// if _, err := task.GetInfo(); err != nil { -// t.Fatal(err) -// } - -// // Test failure -// DmTaskGetInfo = dmTaskGetInfoFail -// defer func() { DmTaskGetInfo = dmTaskGetInfoFct }() - -// if _, err := task.GetInfo(); err != ErrTaskGetInfo { -// t.Fatalf("An error should have occured running GetInfo.") -// } -// } - -// func TestTaskGetNextTarget(t *testing.T) { -// task := taskCreate(t, DeviceInfo) - -// if next, _, _, _, _ := task.GetNextTarget(0); next == 0 { -// t.Fatalf("The next target should not be 0.") -// } -// } - -/// Utils -func taskCreate(t *testing.T, taskType TaskType) *Task { - task := TaskCreate(taskType) - if task == nil { - t.Fatalf("Error creating task") - } - return task -} - -/// Failure function replacement -func dmTaskCreateFail(t int) *CDmTask { - return nil -} - -func dmTaskRunFail(task *CDmTask) int { - return -1 -} - -func dmTaskSetNameFail(task *CDmTask, name string) int { - return -1 -} - -func dmTaskSetMessageFail(task *CDmTask, message string) int { - return -1 -} - -func dmTaskSetSectorFail(task *CDmTask, sector uint64) int { - return -1 -} - -func dmTaskSetCookieFail(task *CDmTask, cookie *uint, flags uint16) int { - return -1 -} - -func dmTaskSetAddNodeFail(task *CDmTask, addNode AddNodeType) int { - return -1 -} - -func dmTaskSetRoFail(task *CDmTask) int { - return -1 -} - -func dmTaskAddTargetFail(task *CDmTask, - start, size uint64, ttype, params string) int { - return -1 -} - -func dmTaskGetInfoFail(task *CDmTask, info *Info) int { - return -1 -} - -func dmGetNextTargetFail(task *CDmTask, next uintptr, start, length *uint64, - target, params *string) uintptr { - return 0 -} - -func dmAttachLoopDeviceFail(filename string, fd *int) string { - return "" -} - -func sysGetBlockSizeFail(fd uintptr, size *uint64) sysErrno { - return 1 -} - -func dmUdevWaitFail(cookie uint) int { - return -1 -} - -func dmSetDevDirFail(dir string) int { - return -1 -} - -func dmGetLibraryVersionFail(version *string) int { - return -1 +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) } diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 9f240d96e0..609971cda1 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -26,7 +26,7 @@ type Driver struct { home string } -var Init = func(home string) (graphdriver.Driver, error) { +func Init(home string) (graphdriver.Driver, error) { deviceSet, err := NewDeviceSet(home, true) if err != nil { return nil, err @@ -94,7 +94,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { mp := path.Join(d.home, "mnt", id) // Create the target directories if they don't exist - if err := osMkdirAll(mp, 0755); err != nil && !osIsExist(err) { + if err := os.MkdirAll(mp, 0755); err != nil && !os.IsExist(err) { return "", err } @@ -104,13 +104,13 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { } rootFs := path.Join(mp, "rootfs") - if err := osMkdirAll(rootFs, 0755); err != nil && !osIsExist(err) { + if err := os.MkdirAll(rootFs, 0755); err != nil && !os.IsExist(err) { d.DeviceSet.UnmountDevice(id) return "", err } idFile := path.Join(mp, "id") - if _, err := osStat(idFile); err != nil && osIsNotExist(err) { + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { // Create an "id" file with the container/image id in it to help reconscruct this in case // of later problems if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { diff --git a/daemon/graphdriver/devmapper/driver_test.go b/daemon/graphdriver/devmapper/driver_test.go deleted file mode 100644 index 913add7c8b..0000000000 --- a/daemon/graphdriver/devmapper/driver_test.go +++ /dev/null @@ -1,880 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "fmt" - "github.com/dotcloud/docker/daemon/graphdriver" - "io/ioutil" - "path" - "runtime" - "strings" - "syscall" - "testing" -) - -func init() { - // Reduce the size the the base fs and loopback for the tests - DefaultDataLoopbackSize = 300 * 1024 * 1024 - DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 - DefaultBaseFsSize = 300 * 1024 * 1024 -} - -// denyAllDevmapper mocks all calls to libdevmapper in the unit tests, and denies them by default -func denyAllDevmapper() { - // Hijack all calls to libdevmapper with default panics. - // Authorized calls are selectively hijacked in each tests. - DmTaskCreate = func(t int) *CDmTask { - panic("DmTaskCreate: this method should not be called here") - } - DmTaskRun = func(task *CDmTask) int { - panic("DmTaskRun: this method should not be called here") - } - DmTaskSetName = func(task *CDmTask, name string) int { - panic("DmTaskSetName: this method should not be called here") - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - panic("DmTaskSetMessage: this method should not be called here") - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - panic("DmTaskSetSector: this method should not be called here") - } - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - panic("DmTaskSetCookie: this method should not be called here") - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - panic("DmTaskSetAddNode: this method should not be called here") - } - DmTaskSetRo = func(task *CDmTask) int { - panic("DmTaskSetRo: this method should not be called here") - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - panic("DmTaskAddTarget: this method should not be called here") - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - panic("DmTaskGetInfo: this method should not be called here") - } - DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { - panic("DmGetNextTarget: this method should not be called here") - } - DmUdevWait = func(cookie uint) int { - panic("DmUdevWait: this method should not be called here") - } - DmSetDevDir = func(dir string) int { - panic("DmSetDevDir: this method should not be called here") - } - DmGetLibraryVersion = func(version *string) int { - panic("DmGetLibraryVersion: this method should not be called here") - } - DmLogInitVerbose = func(level int) { - panic("DmLogInitVerbose: this method should not be called here") - } - DmTaskDestroy = func(task *CDmTask) { - panic("DmTaskDestroy: this method should not be called here") - } - LogWithErrnoInit = func() { - panic("LogWithErrnoInit: this method should not be called here") - } -} - -func denyAllSyscall() { - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - panic("sysMount: this method should not be called here") - } - sysUnmount = func(target string, flags int) (err error) { - panic("sysUnmount: this method should not be called here") - } - sysCloseOnExec = func(fd int) { - panic("sysCloseOnExec: this method should not be called here") - } - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - panic("sysSyscall: this method should not be called here") - } - // Not a syscall, but forbidding it here anyway - Mounted = func(mnt string) (bool, error) { - panic("devmapper.Mounted: this method should not be called here") - } - // osOpenFile = os.OpenFile - // osNewFile = os.NewFile - // osCreate = os.Create - // osStat = os.Stat - // osIsNotExist = os.IsNotExist - // osIsExist = os.IsExist - // osMkdirAll = os.MkdirAll - // osRemoveAll = os.RemoveAll - // osRename = os.Rename - // osReadlink = os.Readlink - - // execRun = func(name string, args ...string) error { - // return exec.Command(name, args...).Run() - // } -} - -func mkTestDirectory(t *testing.T) string { - dir, err := ioutil.TempDir("", "docker-test-devmapper-") - if err != nil { - t.Fatal(err) - } - return dir -} - -func newDriver(t *testing.T) *Driver { - home := mkTestDirectory(t) - d, err := Init(home) - if err != nil { - t.Fatal(err) - } - return d.(*Driver) -} - -func cleanup(d *Driver) { - d.Cleanup() - osRemoveAll(d.home) -} - -type Set map[string]bool - -func (r Set) Assert(t *testing.T, names ...string) { - for _, key := range names { - required := true - if strings.HasPrefix(key, "?") { - key = key[1:] - required = false - } - if _, exists := r[key]; !exists && required { - t.Fatalf("Key not set: %s", key) - } - delete(r, key) - } - if len(r) != 0 { - t.Fatalf("Unexpected keys: %v", r) - } -} - -func TestInit(t *testing.T) { - var ( - calls = make(Set) - taskMessages = make(Set) - taskTypes = make(Set) - home = mkTestDirectory(t) - ) - defer osRemoveAll(home) - - func() { - denyAllDevmapper() - DmSetDevDir = func(dir string) int { - calls["DmSetDevDir"] = true - expectedDir := "/dev" - if dir != expectedDir { - t.Fatalf("Wrong libdevmapper call\nExpected: DmSetDevDir(%v)\nReceived: DmSetDevDir(%v)\n", expectedDir, dir) - } - return 0 - } - LogWithErrnoInit = func() { - calls["DmLogWithErrnoInit"] = true - } - var task1 CDmTask - DmTaskCreate = func(taskType int) *CDmTask { - calls["DmTaskCreate"] = true - taskTypes[fmt.Sprintf("%d", taskType)] = true - return &task1 - } - DmTaskSetName = func(task *CDmTask, name string) int { - calls["DmTaskSetName"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", expectedTask, task) - } - // FIXME: use Set.AssertRegexp() - if !strings.HasPrefix(name, "docker-") && !strings.HasPrefix(name, "/dev/mapper/docker-") || - !strings.HasSuffix(name, "-pool") && !strings.HasSuffix(name, "-base") { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", "docker-...-pool", name) - } - return 1 - } - DmTaskRun = func(task *CDmTask) int { - calls["DmTaskRun"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskRun(%v)\nReceived: DmTaskRun(%v)\n", expectedTask, task) - } - return 1 - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - calls["DmTaskGetInfo"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskGetInfo(%v)\nReceived: DmTaskGetInfo(%v)\n", expectedTask, task) - } - // This will crash if info is not dereferenceable - info.Exists = 0 - return 1 - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - calls["DmTaskSetSector"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) - } - if expectedSector := uint64(0); sector != expectedSector { - t.Fatalf("Wrong libdevmapper call to DmTaskSetSector\nExpected: %v\nReceived: %v\n", expectedSector, sector) - } - return 1 - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - calls["DmTaskSetMessage"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) - } - taskMessages[message] = true - return 1 - } - DmTaskDestroy = func(task *CDmTask) { - calls["DmTaskDestroy"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - calls["DmTaskSetTarget"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - if start != 0 { - t.Fatalf("Wrong start: %d != %d", start, 0) - } - if ttype != "thin" && ttype != "thin-pool" { - t.Fatalf("Wrong ttype: %s", ttype) - } - // Quick smoke test - if params == "" { - t.Fatalf("Params should not be empty") - } - return 1 - } - fakeCookie := uint(4321) - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - calls["DmTaskSetCookie"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - if flags != 0 { - t.Fatalf("Cookie flags should be 0 (not %x)", flags) - } - *cookie = fakeCookie - return 1 - } - DmUdevWait = func(cookie uint) int { - calls["DmUdevWait"] = true - if cookie != fakeCookie { - t.Fatalf("Wrong cookie: %d != %d", cookie, fakeCookie) - } - return 1 - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - if addNode != AddNodeOnCreate { - t.Fatalf("Wrong AddNoteType: %v (expected %v)", addNode, AddNodeOnCreate) - } - calls["DmTaskSetAddNode"] = true - return 1 - } - execRun = func(name string, args ...string) error { - calls["execRun"] = true - if name != "mkfs.ext4" { - t.Fatalf("Expected %s to be executed, not %s", "mkfs.ext4", name) - } - return nil - } - driver, err := Init(home) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := driver.Cleanup(); err != nil { - t.Fatal(err) - } - }() - }() - // Put all tests in a function to make sure the garbage collection will - // occur. - - // Call GC to cleanup runtime.Finalizers - runtime.GC() - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "DmTaskDestroy", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - ) - taskTypes.Assert(t, "0", "6", "17") - taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1") -} - -func fakeInit() func(home string) (graphdriver.Driver, error) { - oldInit := Init - Init = func(home string) (graphdriver.Driver, error) { - return &Driver{ - home: home, - }, nil - } - return oldInit -} - -func restoreInit(init func(home string) (graphdriver.Driver, error)) { - Init = init -} - -func mockAllDevmapper(calls Set) { - DmSetDevDir = func(dir string) int { - calls["DmSetDevDir"] = true - return 0 - } - LogWithErrnoInit = func() { - calls["DmLogWithErrnoInit"] = true - } - DmTaskCreate = func(taskType int) *CDmTask { - calls["DmTaskCreate"] = true - return &CDmTask{} - } - DmTaskSetName = func(task *CDmTask, name string) int { - calls["DmTaskSetName"] = true - return 1 - } - DmTaskRun = func(task *CDmTask) int { - calls["DmTaskRun"] = true - return 1 - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - calls["DmTaskGetInfo"] = true - return 1 - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - calls["DmTaskSetSector"] = true - return 1 - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - calls["DmTaskSetMessage"] = true - return 1 - } - DmTaskDestroy = func(task *CDmTask) { - calls["DmTaskDestroy"] = true - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - calls["DmTaskSetTarget"] = true - return 1 - } - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - calls["DmTaskSetCookie"] = true - return 1 - } - DmUdevWait = func(cookie uint) int { - calls["DmUdevWait"] = true - return 1 - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - calls["DmTaskSetAddNode"] = true - return 1 - } - execRun = func(name string, args ...string) error { - calls["execRun"] = true - return nil - } -} - -func TestDriverName(t *testing.T) { - denyAllDevmapper() - defer denyAllDevmapper() - - oldInit := fakeInit() - defer restoreInit(oldInit) - - d := newDriver(t) - if d.String() != "devicemapper" { - t.Fatalf("Expected driver name to be devicemapper got %s", d.String()) - } -} - -func TestDriverCreate(t *testing.T) { - denyAllDevmapper() - denyAllSyscall() - defer denyAllSyscall() - defer denyAllDevmapper() - - calls := make(Set) - mockAllDevmapper(calls) - - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - calls["sysMount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) - } - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFstype := "ext4"; fstype != expectedFstype { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) - } - if expectedFlags := uintptr(3236757504); flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - - sysUnmount = func(target string, flag int) error { - //calls["sysUnmount"] = true - - return nil - } - - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") { - t.Fatalf("Wrong mounted call\nExpected: Mounted(%v)\nReceived: Mounted(%v)\n", "/tmp/docker-test-devmapper-.../mnt/1", mnt) - } - return false, nil - } - - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - calls["sysSyscall"] = true - if trap != sysSysIoctl { - t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) - } - switch a2 { - case LoopSetFd: - calls["ioctl.loopsetfd"] = true - case LoopCtlGetFree: - calls["ioctl.loopctlgetfree"] = true - case LoopGetStatus64: - calls["ioctl.loopgetstatus"] = true - case LoopSetStatus64: - calls["ioctl.loopsetstatus"] = true - case LoopClrFd: - calls["ioctl.loopclrfd"] = true - case LoopSetCapacity: - calls["ioctl.loopsetcapacity"] = true - case BlkGetSize64: - calls["ioctl.blkgetsize"] = true - default: - t.Fatalf("Unexpected IOCTL. Received %d", a2) - } - return 0, 0, 0 - } - - func() { - d := newDriver(t) - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - "sysSyscall", - "ioctl.blkgetsize", - "ioctl.loopsetfd", - "ioctl.loopsetstatus", - "?ioctl.loopctlgetfree", - ) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - calls.Assert(t, - "DmTaskCreate", - "DmTaskGetInfo", - "DmTaskRun", - "DmTaskSetSector", - "DmTaskSetName", - "DmTaskSetMessage", - ) - - }() - - runtime.GC() - - calls.Assert(t, - "DmTaskDestroy", - ) -} - -func TestDriverRemove(t *testing.T) { - denyAllDevmapper() - denyAllSyscall() - defer denyAllSyscall() - defer denyAllDevmapper() - - calls := make(Set) - mockAllDevmapper(calls) - - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - calls["sysMount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) - } - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFstype := "ext4"; fstype != expectedFstype { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) - } - if expectedFlags := uintptr(3236757504); flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - sysUnmount = func(target string, flags int) (err error) { - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFlags := 0; flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - return false, nil - } - - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - calls["sysSyscall"] = true - if trap != sysSysIoctl { - t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) - } - switch a2 { - case LoopSetFd: - calls["ioctl.loopsetfd"] = true - case LoopCtlGetFree: - calls["ioctl.loopctlgetfree"] = true - case LoopGetStatus64: - calls["ioctl.loopgetstatus"] = true - case LoopSetStatus64: - calls["ioctl.loopsetstatus"] = true - case LoopClrFd: - calls["ioctl.loopclrfd"] = true - case LoopSetCapacity: - calls["ioctl.loopsetcapacity"] = true - case BlkGetSize64: - calls["ioctl.blkgetsize"] = true - default: - t.Fatalf("Unexpected IOCTL. Received %d", a2) - } - return 0, 0, 0 - } - - func() { - d := newDriver(t) - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - "sysSyscall", - "ioctl.blkgetsize", - "ioctl.loopsetfd", - "ioctl.loopsetstatus", - "?ioctl.loopctlgetfree", - ) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - calls.Assert(t, - "DmTaskCreate", - "DmTaskGetInfo", - "DmTaskRun", - "DmTaskSetSector", - "DmTaskSetName", - "DmTaskSetMessage", - ) - - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - return true, nil - } - - if err := d.Remove("1"); err != nil { - t.Fatal(err) - } - - calls.Assert(t, - "DmTaskRun", - "DmTaskSetSector", - "DmTaskSetName", - "DmTaskSetMessage", - "DmTaskCreate", - "DmTaskGetInfo", - "DmTaskSetCookie", - "DmTaskSetTarget", - "DmTaskSetAddNode", - "DmUdevWait", - ) - }() - runtime.GC() - - calls.Assert(t, - "DmTaskDestroy", - ) -} - -func TestCleanup(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skip("Unimplemented") - d := newDriver(t) - defer osRemoveAll(d.home) - - mountPoints := make([]string, 2) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - // Mount the id - p, err := d.Get("1", "") - if err != nil { - t.Fatal(err) - } - mountPoints[0] = p - - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - p, err = d.Get("2", "") - if err != nil { - t.Fatal(err) - } - mountPoints[1] = p - - // Ensure that all the mount points are currently mounted - for _, p := range mountPoints { - if mounted, err := Mounted(p); err != nil { - t.Fatal(err) - } else if !mounted { - t.Fatalf("Expected %s to be mounted", p) - } - } - - // Ensure that devices are active - for _, p := range []string{"1", "2"} { - if !d.HasActivatedDevice(p) { - t.Fatalf("Expected %s to have an active device", p) - } - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - - // Ensure that all the mount points are no longer mounted - for _, p := range mountPoints { - if mounted, err := Mounted(p); err != nil { - t.Fatal(err) - } else if mounted { - t.Fatalf("Expected %s to not be mounted", p) - } - } - - // Ensure that devices are no longer activated - for _, p := range []string{"1", "2"} { - if d.HasActivatedDevice(p) { - t.Fatalf("Expected %s not be an active device", p) - } - } -} - -func TestNotMounted(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skip("Not implemented") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - mounted, err := Mounted(path.Join(d.home, "mnt", "1")) - if err != nil { - t.Fatal(err) - } - if mounted { - t.Fatal("Id 1 should not be mounted") - } -} - -func TestMounted(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } - - mounted, err := Mounted(path.Join(d.home, "mnt", "1")) - if err != nil { - t.Fatal(err) - } - if !mounted { - t.Fatal("Id 1 should be mounted") - } -} - -func TestInitCleanedDriver(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - - driver, err := Init(d.home) - if err != nil { - t.Fatal(err) - } - d = driver.(*Driver) - defer cleanup(d) - - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } -} - -func TestMountMountedDriver(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - // Perform get on same id to ensure that it will - // not be mounted twice - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } -} - -func TestGetReturnsValidDevice(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if !d.HasDevice("1") { - t.Fatalf("Expected id 1 to be in device set") - } - - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } - - if !d.HasActivatedDevice("1") { - t.Fatalf("Expected id 1 to be activated") - } - - if !d.HasInitializedDevice("1") { - t.Fatalf("Expected id 1 to be initialized") - } -} - -func TestDriverGetSize(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skipf("Size is currently not implemented") - - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - mountPoint, err := d.Get("1", "") - if err != nil { - t.Fatal(err) - } - - size := int64(1024) - - f, err := osCreate(path.Join(mountPoint, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - // diffSize, err := d.DiffSize("1") - // if err != nil { - // t.Fatal(err) - // } - // if diffSize != size { - // t.Fatalf("Expected size %d got %d", size, diffSize) - // } -} - -func assertMap(t *testing.T, m map[string]bool, keys ...string) { - for _, key := range keys { - if _, exists := m[key]; !exists { - t.Fatalf("Key not set: %s", key) - } - delete(m, key) - } - if len(m) != 0 { - t.Fatalf("Unexpected keys: %v", m) - } -} diff --git a/daemon/graphdriver/devmapper/ioctl.go b/daemon/graphdriver/devmapper/ioctl.go index 30bafff943..8f403da2b0 100644 --- a/daemon/graphdriver/devmapper/ioctl.go +++ b/daemon/graphdriver/devmapper/ioctl.go @@ -3,11 +3,12 @@ package devmapper import ( + "syscall" "unsafe" ) func ioctlLoopCtlGetFree(fd uintptr) (int, error) { - index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0) + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) if err != 0 { return 0, err } @@ -15,21 +16,21 @@ func ioctlLoopCtlGetFree(fd uintptr) (int, error) { } func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { return err } return nil } func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return err } return nil } func ioctlLoopClrFd(loopFd uintptr) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { return err } return nil @@ -38,14 +39,14 @@ func ioctlLoopClrFd(loopFd uintptr) error { func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { loopInfo := &LoopInfo64{} - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return nil, err } return loopInfo, nil } func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { return err } return nil @@ -53,7 +54,7 @@ func ioctlLoopSetCapacity(loopFd uintptr, value int) error { func ioctlBlkGetSize64(fd uintptr) (int64, error) { var size int64 - if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { return 0, err } return size, nil @@ -64,7 +65,7 @@ func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { r[0] = offset r[1] = length - if _, _, err := sysSyscall(sysSysIoctl, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { return err } return nil diff --git a/daemon/graphdriver/devmapper/mount.go b/daemon/graphdriver/devmapper/mount.go index 4f19109bf8..6de9e46c8c 100644 --- a/daemon/graphdriver/devmapper/mount.go +++ b/daemon/graphdriver/devmapper/mount.go @@ -3,25 +3,27 @@ package devmapper import ( + "os" "path/filepath" + "syscall" ) // FIXME: this is copy-pasted from the aufs driver. // It should be moved into the core. -var Mounted = func(mountpoint string) (bool, error) { - mntpoint, err := osStat(mountpoint) +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) if err != nil { - if osIsNotExist(err) { + if os.IsNotExist(err) { return false, nil } return false, err } - parent, err := osStat(filepath.Join(mountpoint, "..")) + parent, err := os.Stat(filepath.Join(mountpoint, "..")) if err != nil { return false, err } - mntpointSt := toSysStatT(mntpoint.Sys()) - parentSt := toSysStatT(parent.Sys()) + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) return mntpointSt.Dev != parentSt.Dev, nil } diff --git a/daemon/graphdriver/devmapper/sys.go b/daemon/graphdriver/devmapper/sys.go deleted file mode 100644 index 5a9ab4d74b..0000000000 --- a/daemon/graphdriver/devmapper/sys.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "os" - "os/exec" - "syscall" -) - -type ( - sysStatT syscall.Stat_t - sysErrno syscall.Errno - - osFile struct{ *os.File } -) - -var ( - sysMount = syscall.Mount - sysUnmount = syscall.Unmount - sysCloseOnExec = syscall.CloseOnExec - sysSyscall = syscall.Syscall - - osOpenFile = func(name string, flag int, perm os.FileMode) (*osFile, error) { - f, err := os.OpenFile(name, flag, perm) - return &osFile{File: f}, err - } - osOpen = func(name string) (*osFile, error) { f, err := os.Open(name); return &osFile{File: f}, err } - osNewFile = os.NewFile - osCreate = os.Create - osStat = os.Stat - osIsNotExist = os.IsNotExist - osIsExist = os.IsExist - osMkdirAll = os.MkdirAll - osRemoveAll = os.RemoveAll - osRename = os.Rename - osReadlink = os.Readlink - - execRun = func(name string, args ...string) error { return exec.Command(name, args...).Run() } -) - -const ( - sysMsMgcVal = syscall.MS_MGC_VAL - sysMsRdOnly = syscall.MS_RDONLY - sysEInval = syscall.EINVAL - sysSysIoctl = syscall.SYS_IOCTL - sysEBusy = syscall.EBUSY - - osORdOnly = os.O_RDONLY - osORdWr = os.O_RDWR - osOCreate = os.O_CREATE - osModeDevice = os.ModeDevice -) - -func toSysStatT(i interface{}) *sysStatT { - return (*sysStatT)(i.(*syscall.Stat_t)) -} diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index 80bf8a0143..96f8d3ab3e 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -1,9 +1,9 @@ package graphdriver import ( + "errors" "fmt" "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/utils" "os" "path" ) @@ -43,6 +43,8 @@ var ( "devicemapper", "vfs", } + + ErrNotSupported = errors.New("driver not supported") ) func init() { @@ -62,7 +64,7 @@ func GetDriver(name, home string) (Driver, error) { if initFunc, exists := drivers[name]; exists { return initFunc(path.Join(home, name)) } - return nil, fmt.Errorf("No such driver: %s", name) + return nil, ErrNotSupported } func New(root string) (driver Driver, err error) { @@ -74,9 +76,12 @@ func New(root string) (driver Driver, err error) { // Check for priority drivers first for _, name := range priority { - if driver, err = GetDriver(name, root); err != nil { - utils.Debugf("Error loading driver %s: %s", name, err) - continue + driver, err = GetDriver(name, root) + if err != nil { + if err == ErrNotSupported { + continue + } + return nil, err } return driver, nil } @@ -84,9 +89,12 @@ func New(root string) (driver Driver, err error) { // Check all registered drivers if no priority driver is found for _, initFunc := range drivers { if driver, err = initFunc(root); err != nil { - continue + if err == ErrNotSupported { + continue + } + return nil, err } return driver, nil } - return nil, err + return nil, fmt.Errorf("No supported storage backend found") } diff --git a/daemon/graphdriver/graphtest/graphtest.go b/daemon/graphdriver/graphtest/graphtest.go new file mode 100644 index 0000000000..f8ccb2ef33 --- /dev/null +++ b/daemon/graphdriver/graphtest/graphtest.go @@ -0,0 +1,228 @@ +package graphtest + +import ( + "github.com/dotcloud/docker/daemon/graphdriver" + "io/ioutil" + "os" + "path" + "syscall" + "testing" +) + +var ( + drv *Driver +) + +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t *testing.T, name string) *Driver { + root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, root) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t *testing.T, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +func GetDriver(t *testing.T, name string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name) + } else { + drv.refCount++ + } + return drv +} + +func PutDriver(t *testing.T) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatal("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatal("%s not owned by gid %d", path, gid) + } + } + +} + +// Creates an new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + if err := driver.Create("empty", ""); err != nil { + t.Fatal(err) + } + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") + + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + +} + +func createBase(t *testing.T, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.Create(name, ""); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} + +func DriverTestCreateBase(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + verifyBase(t, driver, "Base") + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} + +func DriverTestCreateSnap(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + + if err := driver.Create("Snap", "Base"); err != nil { + t.Fatal(err) + } + + verifyBase(t, driver, "Snap") + + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go index 765b21cded..7473aa659d 100644 --- a/daemon/graphdriver/vfs/driver.go +++ b/daemon/graphdriver/vfs/driver.go @@ -47,7 +47,7 @@ func (d *Driver) Create(id, parent string) error { if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { return err } - if err := os.Mkdir(dir, 0700); err != nil { + if err := os.Mkdir(dir, 0755); err != nil { return err } if parent == "" { diff --git a/daemon/graphdriver/vfs/vfs_test.go b/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 0000000000..e79f93c91d --- /dev/null +++ b/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,28 @@ +package vfs + +import ( + "github.com/dotcloud/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/history.go b/daemon/history.go index 57a00a2090..0b125ad2b3 100644 --- a/daemon/history.go +++ b/daemon/history.go @@ -26,5 +26,8 @@ func (history *History) Swap(i, j int) { func (history *History) Add(container *Container) { *history = append(*history, container) +} + +func (history *History) Sort() { sort.Sort(history) } diff --git a/daemon/inspect.go b/daemon/inspect.go new file mode 100644 index 0000000000..0f771a3ca2 --- /dev/null +++ b/daemon/inspect.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "encoding/json" + + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" +) + +func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + b, err := json.Marshal(&struct { + *Container + HostConfig *runconfig.HostConfig + }{container, container.HostConfig()}) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff --git a/daemon/network_settings.go b/daemon/network_settings.go index 762270362b..a5c750acfe 100644 --- a/daemon/network_settings.go +++ b/daemon/network_settings.go @@ -23,7 +23,7 @@ func (settings *NetworkSettings) PortMappingAPI() *engine.Table { p, _ := nat.ParsePort(port.Port()) if len(bindings) == 0 { out := &engine.Env{} - out.SetInt("PublicPort", p) + out.SetInt("PrivatePort", p) out.Set("Type", port.Proto()) outs.Add(out) continue diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index c64aa423d1..a14941a8f3 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -380,7 +380,7 @@ func AllocatePort(job *engine.Job) engine.Status { ip = defaultBindingIP id = job.Args[0] hostIP = job.Getenv("HostIP") - hostPort = job.GetenvInt("HostPort") + origHostPort = job.GetenvInt("HostPort") containerPort = job.GetenvInt("ContainerPort") proto = job.Getenv("Proto") network = currentInterfaces[id] @@ -390,29 +390,45 @@ func AllocatePort(job *engine.Job) engine.Status { ip = net.ParseIP(hostIP) } - // host ip, proto, and host port - hostPort, err = portallocator.RequestPort(ip, proto, hostPort) - if err != nil { - return job.Error(err) - } - var ( + hostPort int container net.Addr host net.Addr ) - if proto == "tcp" { - host = &net.TCPAddr{IP: ip, Port: hostPort} - container = &net.TCPAddr{IP: network.IP, Port: containerPort} - } else { - host = &net.UDPAddr{IP: ip, Port: hostPort} - container = &net.UDPAddr{IP: network.IP, Port: containerPort} + /* + Try up to 10 times to get a port that's not already allocated. + + In the event of failure to bind, return the error that portmapper.Map + yields. + */ + for i := 0; i < 10; i++ { + // host ip, proto, and host port + hostPort, err = portallocator.RequestPort(ip, proto, origHostPort) + + if err != nil { + return job.Error(err) + } + + if proto == "tcp" { + host = &net.TCPAddr{IP: ip, Port: hostPort} + container = &net.TCPAddr{IP: network.IP, Port: containerPort} + } else { + host = &net.UDPAddr{IP: ip, Port: hostPort} + container = &net.UDPAddr{IP: network.IP, Port: containerPort} + } + + if err = portmapper.Map(container, ip, hostPort); err == nil { + break + } + + job.Logf("Failed to bind %s:%d for container address %s:%d. Trying another port.", ip.String(), hostPort, network.IP.String(), containerPort) } - if err := portmapper.Map(container, ip, hostPort); err != nil { - portallocator.ReleasePort(ip, proto, hostPort) + if err != nil { return job.Error(err) } + network.PortMappings = append(network.PortMappings, host) out := engine.Env{} diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go index 914df34942..4bcce65174 100644 --- a/daemon/networkdriver/ipallocator/allocator.go +++ b/daemon/networkdriver/ipallocator/allocator.go @@ -7,9 +7,19 @@ import ( "github.com/dotcloud/docker/pkg/collections" "net" "sync" + "sync/atomic" ) -type networkSet map[string]*collections.OrderedIntSet +type allocatedMap struct { + *collections.OrderedIntSet + last int32 +} + +func newAllocatedMap() *allocatedMap { + return &allocatedMap{OrderedIntSet: collections.NewOrderedIntSet()} +} + +type networkSet map[string]*allocatedMap var ( ErrNoAvailableIPs = errors.New("no available ip addresses on network") @@ -19,7 +29,6 @@ var ( var ( lock = sync.Mutex{} allocatedIPs = networkSet{} - availableIPS = networkSet{} ) // RequestIP requests an available ip from the given network. It @@ -55,13 +64,11 @@ func ReleaseIP(address *net.IPNet, ip *net.IP) error { checkAddress(address) var ( - existing = allocatedIPs[address.String()] - available = availableIPS[address.String()] + allocated = allocatedIPs[address.String()] pos = getPosition(address, ip) ) - existing.Remove(int(pos)) - available.Push(int(pos)) + allocated.Remove(int(pos)) return nil } @@ -82,29 +89,19 @@ func getPosition(address *net.IPNet, ip *net.IP) int32 { func getNextIp(address *net.IPNet) (*net.IP, error) { var ( ownIP = ipToInt(&address.IP) - available = availableIPS[address.String()] allocated = allocatedIPs[address.String()] first, _ = networkdriver.NetworkRange(address) base = ipToInt(&first) size = int(networkdriver.NetworkSize(address.Mask)) max = int32(size - 2) // size -1 for the broadcast address, -1 for the gateway address - pos = int32(available.Pop()) + pos = atomic.LoadInt32(&allocated.last) ) - // We pop and push the position not the ip - if pos != 0 { - ip := intToIP(int32(base + pos)) - allocated.Push(int(pos)) - - return ip, nil - } - var ( firstNetIP = address.IP.To4().Mask(address.Mask) firstAsInt = ipToInt(&firstNetIP) + 1 ) - pos = int32(allocated.PullBack()) for i := int32(0); i < max; i++ { pos = pos%max + 1 next := int32(base + pos) @@ -116,6 +113,7 @@ func getNextIp(address *net.IPNet) (*net.IP, error) { if !allocated.Exists(int(pos)) { ip := intToIP(next) allocated.Push(int(pos)) + atomic.StoreInt32(&allocated.last, pos) return ip, nil } } @@ -124,15 +122,14 @@ func getNextIp(address *net.IPNet) (*net.IP, error) { func registerIP(address *net.IPNet, ip *net.IP) error { var ( - existing = allocatedIPs[address.String()] - available = availableIPS[address.String()] + allocated = allocatedIPs[address.String()] pos = getPosition(address, ip) ) - if existing.Exists(int(pos)) { + if allocated.Exists(int(pos)) { return ErrIPAlreadyAllocated } - available.Remove(int(pos)) + atomic.StoreInt32(&allocated.last, pos) return nil } @@ -153,7 +150,6 @@ func intToIP(n int32) *net.IP { func checkAddress(address *net.IPNet) { key := address.String() if _, exists := allocatedIPs[key]; !exists { - allocatedIPs[key] = collections.NewOrderedIntSet() - availableIPS[key] = collections.NewOrderedIntSet() + allocatedIPs[key] = newAllocatedMap() } } diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/daemon/networkdriver/ipallocator/allocator_test.go index 5e9fcfc983..2b63d9a03c 100644 --- a/daemon/networkdriver/ipallocator/allocator_test.go +++ b/daemon/networkdriver/ipallocator/allocator_test.go @@ -8,7 +8,6 @@ import ( func reset() { allocatedIPs = networkSet{} - availableIPS = networkSet{} } func TestRequestNewIps(t *testing.T) { @@ -18,8 +17,10 @@ func TestRequestNewIps(t *testing.T) { Mask: []byte{255, 255, 255, 0}, } + var ip *net.IP + var err error for i := 2; i < 10; i++ { - ip, err := RequestIP(network, nil) + ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } @@ -28,6 +29,17 @@ func TestRequestNewIps(t *testing.T) { t.Fatalf("Expected ip %s got %s", expected, ip.String()) } } + value := intToIP(ipToInt(ip) + 1).String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + if ip.String() != value { + t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String()) + } } func TestReleaseIp(t *testing.T) { @@ -64,6 +76,17 @@ func TestGetReleasedIp(t *testing.T) { t.Fatal(err) } + for i := 0; i < 252; i++ { + _, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + err = ReleaseIP(network, ip) + if err != nil { + t.Fatal(err) + } + } + ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) @@ -185,24 +208,6 @@ func TestIPAllocator(t *testing.T) { newIPs[i] = ip } - // Before loop begin - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 0 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(f) - // ↑ - - // After i = 1 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(u) - // ↑ - - // After i = 2 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) - // ↑ - - // Reordered these because the new set will always return the - // lowest ips first and not in the order that they were released assertIPEquals(t, &expectedIPs[2], newIPs[0]) assertIPEquals(t, &expectedIPs[3], newIPs[1]) assertIPEquals(t, &expectedIPs[4], newIPs[2]) @@ -234,6 +239,86 @@ func TestAllocateFirstIP(t *testing.T) { } } +func TestAllocateAllIps(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + var ( + current, first *net.IP + err error + isFirst = true + ) + + for err == nil { + current, err = RequestIP(network, nil) + if isFirst { + first = current + isFirst = false + } + } + + if err != ErrNoAvailableIPs { + t.Fatal(err) + } + + if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { + t.Fatal(err) + } + + if err := ReleaseIP(network, first); err != nil { + t.Fatal(err) + } + + again, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, first, again) +} + +func TestAllocateDifferentSubnets(t *testing.T) { + defer reset() + network1 := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + network2 := &net.IPNet{ + IP: []byte{127, 0, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + expectedIPs := []net.IP{ + 0: net.IPv4(192, 168, 0, 2), + 1: net.IPv4(192, 168, 0, 3), + 2: net.IPv4(127, 0, 0, 2), + 3: net.IPv4(127, 0, 0, 3), + } + + ip11, err := RequestIP(network1, nil) + if err != nil { + t.Fatal(err) + } + ip12, err := RequestIP(network1, nil) + if err != nil { + t.Fatal(err) + } + ip21, err := RequestIP(network2, nil) + if err != nil { + t.Fatal(err) + } + ip22, err := RequestIP(network2, nil) + if err != nil { + t.Fatal(err) + } + assertIPEquals(t, &expectedIPs[0], ip11) + assertIPEquals(t, &expectedIPs[1], ip12) + assertIPEquals(t, &expectedIPs[2], ip21) + assertIPEquals(t, &expectedIPs[3], ip22) +} + func assertIPEquals(t *testing.T, ip1, ip2 *net.IP) { if !ip1.Equal(*ip2) { t.Fatalf("Expected IP %s, got %s", ip1, ip2) diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go index 9ecd447116..251ab94473 100644 --- a/daemon/networkdriver/portallocator/portallocator.go +++ b/daemon/networkdriver/portallocator/portallocator.go @@ -2,21 +2,21 @@ package portallocator import ( "errors" - "github.com/dotcloud/docker/pkg/collections" "net" "sync" ) +type ( + portMap map[int]bool + protocolMap map[string]portMap + ipMapping map[string]protocolMap +) + const ( BeginPortRange = 49153 EndPortRange = 65535 ) -type ( - portMappings map[string]*collections.OrderedIntSet - ipMapping map[string]portMappings -) - var ( ErrAllPortsAllocated = errors.New("all ports are allocated") ErrPortAlreadyAllocated = errors.New("port has already been allocated") @@ -24,165 +24,106 @@ var ( ) var ( - currentDynamicPort = map[string]int{ - "tcp": BeginPortRange - 1, - "udp": BeginPortRange - 1, - } - defaultIP = net.ParseIP("0.0.0.0") - defaultAllocatedPorts = portMappings{} - otherAllocatedPorts = ipMapping{} - lock = sync.Mutex{} + mutex sync.Mutex + + defaultIP = net.ParseIP("0.0.0.0") + globalMap = ipMapping{} ) -func init() { - defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() - defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() -} - -// RequestPort returns an available port if the port is 0 -// If the provided port is not 0 then it will be checked if -// it is available for allocation func RequestPort(ip net.IP, proto string, port int) (int, error) { - lock.Lock() - defer lock.Unlock() + mutex.Lock() + defer mutex.Unlock() - if err := validateProtocol(proto); err != nil { + if err := validateProto(proto); err != nil { return 0, err } - // If the user requested a specific port to be allocated + ip = getDefault(ip) + + mapping := getOrCreate(ip) + if port > 0 { - if err := registerSetPort(ip, proto, port); err != nil { + if !mapping[proto][port] { + mapping[proto][port] = true + return port, nil + } else { + return 0, ErrPortAlreadyAllocated + } + } else { + port, err := findPort(ip, proto) + + if err != nil { return 0, err } + return port, nil } - return registerDynamicPort(ip, proto) } -// ReleasePort will return the provided port back into the -// pool for reuse func ReleasePort(ip net.IP, proto string, port int) error { - lock.Lock() - defer lock.Unlock() + mutex.Lock() + defer mutex.Unlock() - if err := validateProtocol(proto); err != nil { - return err - } + ip = getDefault(ip) - allocated := defaultAllocatedPorts[proto] - allocated.Remove(port) + mapping := getOrCreate(ip) + delete(mapping[proto], port) - if !equalsDefault(ip) { - registerIP(ip) - - // Remove the port for the specific ip address - allocated = otherAllocatedPorts[ip.String()][proto] - allocated.Remove(port) - } return nil } func ReleaseAll() error { - lock.Lock() - defer lock.Unlock() + mutex.Lock() + defer mutex.Unlock() - currentDynamicPort["tcp"] = BeginPortRange - 1 - currentDynamicPort["udp"] = BeginPortRange - 1 - - defaultAllocatedPorts = portMappings{} - defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() - defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() - - otherAllocatedPorts = ipMapping{} + globalMap = ipMapping{} return nil } -func registerDynamicPort(ip net.IP, proto string) (int, error) { +func getOrCreate(ip net.IP) protocolMap { + ipstr := ip.String() - if !equalsDefault(ip) { - registerIP(ip) - - ipAllocated := otherAllocatedPorts[ip.String()][proto] - - port, err := findNextPort(proto, ipAllocated) - if err != nil { - return 0, err + if _, ok := globalMap[ipstr]; !ok { + globalMap[ipstr] = protocolMap{ + "tcp": portMap{}, + "udp": portMap{}, } - ipAllocated.Push(port) - return port, nil - - } else { - - allocated := defaultAllocatedPorts[proto] - - port, err := findNextPort(proto, allocated) - if err != nil { - return 0, err - } - allocated.Push(port) - return port, nil - } -} - -func registerSetPort(ip net.IP, proto string, port int) error { - allocated := defaultAllocatedPorts[proto] - if allocated.Exists(port) { - return ErrPortAlreadyAllocated } - if !equalsDefault(ip) { - registerIP(ip) - - ipAllocated := otherAllocatedPorts[ip.String()][proto] - if ipAllocated.Exists(port) { - return ErrPortAlreadyAllocated - } - ipAllocated.Push(port) - } else { - allocated.Push(port) - } - return nil + return globalMap[ipstr] } -func equalsDefault(ip net.IP) bool { - return ip == nil || ip.Equal(defaultIP) -} +func findPort(ip net.IP, proto string) (int, error) { + port := BeginPortRange -func findNextPort(proto string, allocated *collections.OrderedIntSet) (int, error) { - port := nextPort(proto) - startSearchPort := port - for allocated.Exists(port) { - port = nextPort(proto) - if startSearchPort == port { + mapping := getOrCreate(ip) + + for mapping[proto][port] { + port++ + + if port > EndPortRange { return 0, ErrAllPortsAllocated } } + + mapping[proto][port] = true + return port, nil } -func nextPort(proto string) int { - c := currentDynamicPort[proto] + 1 - if c > EndPortRange { - c = BeginPortRange +func getDefault(ip net.IP) net.IP { + if ip == nil { + return defaultIP } - currentDynamicPort[proto] = c - return c + + return ip } -func registerIP(ip net.IP) { - if _, exists := otherAllocatedPorts[ip.String()]; !exists { - otherAllocatedPorts[ip.String()] = portMappings{ - "tcp": collections.NewOrderedIntSet(), - "udp": collections.NewOrderedIntSet(), - } - } -} - -func validateProtocol(proto string) error { - if _, exists := defaultAllocatedPorts[proto]; !exists { +func validateProto(proto string) error { + if proto != "tcp" && proto != "udp" { return ErrUnknownProtocol } + return nil } diff --git a/daemon/state.go b/daemon/state.go index 562929c87a..c0ed9516e3 100644 --- a/daemon/state.go +++ b/daemon/state.go @@ -2,9 +2,10 @@ package daemon import ( "fmt" - "github.com/dotcloud/docker/utils" "sync" "time" + + "github.com/dotcloud/docker/pkg/units" ) type State struct { @@ -22,12 +23,12 @@ func (s *State) String() string { defer s.RUnlock() if s.Running { - return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } if s.FinishedAt.IsZero() { return "" } - return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, utils.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } func (s *State) IsRunning() bool { diff --git a/daemon/volumes.go b/daemon/volumes.go index a15e3084b2..c3b003d0ea 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -10,7 +10,7 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/pkg/symlink" ) type BindMap struct { @@ -40,8 +40,11 @@ func setupMountsForContainer(container *Container) error { {container.ResolvConfPath, "/etc/resolv.conf", false, true}, } - if container.HostnamePath != "" && container.HostsPath != "" { + if container.HostnamePath != "" { mounts = append(mounts, execdriver.Mount{container.HostnamePath, "/etc/hostname", false, true}) + } + + if container.HostsPath != "" { mounts = append(mounts, execdriver.Mount{container.HostsPath, "/etc/hosts", false, true}) } @@ -94,11 +97,11 @@ func applyVolumesFrom(container *Container) error { if _, exists := container.Volumes[volPath]; exists { continue } - stat, err := os.Stat(filepath.Join(c.basefs, volPath)) + stat, err := os.Stat(c.getResourcePath(volPath)) if err != nil { return err } - if err := createIfNotExists(filepath.Join(container.basefs, volPath), stat.IsDir()); err != nil { + if err := createIfNotExists(container.getResourcePath(volPath), stat.IsDir()); err != nil { return err } container.Volumes[volPath] = id @@ -162,115 +165,17 @@ func createVolumes(container *Container) error { return err } - volumesDriver := container.daemon.volumes.Driver() // Create the requested volumes if they don't exist for volPath := range container.Config.Volumes { - volPath = filepath.Clean(volPath) - volIsDir := true - // Skip existing volumes - if _, exists := container.Volumes[volPath]; exists { - continue - } - var srcPath string - var isBindMount bool - srcRW := false - // If an external bind is defined for this volume, use that as a source - if bindMap, exists := binds[volPath]; exists { - isBindMount = true - srcPath = bindMap.SrcPath - if !filepath.IsAbs(srcPath) { - return fmt.Errorf("%s must be an absolute path", srcPath) - } - if strings.ToLower(bindMap.Mode) == "rw" { - srcRW = true - } - if stat, err := os.Stat(bindMap.SrcPath); err != nil { - return err - } else { - volIsDir = stat.IsDir() - } - // Otherwise create an directory in $ROOT/volumes/ and use that - } else { - - // Do not pass a container as the parameter for the volume creation. - // The graph driver using the container's information ( Image ) to - // create the parent. - c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil) - if err != nil { - return err - } - srcPath, err = volumesDriver.Get(c.ID, "") - if err != nil { - return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) - } - srcRW = true // RW by default - } - - if p, err := filepath.EvalSymlinks(srcPath); err != nil { - return err - } else { - srcPath = p - } - - // Create the mountpoint - rootVolPath, err := utils.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs) - if err != nil { + if err := initializeVolume(container, volPath, binds); err != nil { return err } + } - newVolPath, err := filepath.Rel(container.basefs, rootVolPath) - if err != nil { + for volPath := range binds { + if err := initializeVolume(container, volPath, binds); err != nil { return err } - newVolPath = "/" + newVolPath - - if volPath != newVolPath { - delete(container.Volumes, volPath) - delete(container.VolumesRW, volPath) - } - - container.Volumes[newVolPath] = srcPath - container.VolumesRW[newVolPath] = srcRW - - if err := createIfNotExists(rootVolPath, volIsDir); err != nil { - return err - } - - // Do not copy or change permissions if we are mounting from the host - if srcRW && !isBindMount { - volList, err := ioutil.ReadDir(rootVolPath) - if err != nil { - return err - } - if len(volList) > 0 { - srcList, err := ioutil.ReadDir(srcPath) - if err != nil { - return err - } - if len(srcList) == 0 { - // If the source volume is empty copy files from the root into the volume - if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { - return err - } - } - } - - var stat syscall.Stat_t - if err := syscall.Stat(rootVolPath, &stat); err != nil { - return err - } - var srcStat syscall.Stat_t - if err := syscall.Stat(srcPath, &srcStat); err != nil { - return err - } - // Change the source volume's ownership if it differs from the root - // files that were just copied - if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { - if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - } - } } return nil } @@ -296,3 +201,130 @@ func createIfNotExists(path string, isDir bool) error { } return nil } + +func initializeVolume(container *Container, volPath string, binds map[string]BindMap) error { + volumesDriver := container.daemon.volumes.Driver() + volPath = filepath.Clean(volPath) + // Skip existing volumes + if _, exists := container.Volumes[volPath]; exists { + return nil + } + + var ( + srcPath string + isBindMount bool + volIsDir = true + + srcRW = false + ) + + // If an external bind is defined for this volume, use that as a source + if bindMap, exists := binds[volPath]; exists { + isBindMount = true + srcPath = bindMap.SrcPath + if !filepath.IsAbs(srcPath) { + return fmt.Errorf("%s must be an absolute path", srcPath) + } + if strings.ToLower(bindMap.Mode) == "rw" { + srcRW = true + } + if stat, err := os.Stat(bindMap.SrcPath); err != nil { + return err + } else { + volIsDir = stat.IsDir() + } + // Otherwise create an directory in $ROOT/volumes/ and use that + } else { + // Do not pass a container as the parameter for the volume creation. + // The graph driver using the container's information ( Image ) to + // create the parent. + c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil) + if err != nil { + return err + } + srcPath, err = volumesDriver.Get(c.ID, "") + if err != nil { + return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) + } + srcRW = true // RW by default + } + + if p, err := filepath.EvalSymlinks(srcPath); err != nil { + return err + } else { + srcPath = p + } + + // Create the mountpoint + rootVolPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs) + if err != nil { + return err + } + + newVolPath, err := filepath.Rel(container.basefs, rootVolPath) + if err != nil { + return err + } + newVolPath = "/" + newVolPath + + if volPath != newVolPath { + delete(container.Volumes, volPath) + delete(container.VolumesRW, volPath) + } + + container.Volumes[newVolPath] = srcPath + container.VolumesRW[newVolPath] = srcRW + + if err := createIfNotExists(rootVolPath, volIsDir); err != nil { + return err + } + + // Do not copy or change permissions if we are mounting from the host + if srcRW && !isBindMount { + if err := copyExistingContents(rootVolPath, srcPath); err != nil { + return err + } + } + return nil +} + +func copyExistingContents(rootVolPath, srcPath string) error { + volList, err := ioutil.ReadDir(rootVolPath) + if err != nil { + return err + } + + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(srcPath) + if err != nil { + return err + } + + if len(srcList) == 0 { + // If the source volume is empty copy files from the root into the volume + if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { + return err + } + } + } + + var ( + stat syscall.Stat_t + srcStat syscall.Stat_t + ) + + if err := syscall.Stat(rootVolPath, &stat); err != nil { + return err + } + if err := syscall.Stat(srcPath, &srcStat); err != nil { + return err + } + // Change the source volume's ownership if it differs from the root + // files that were just copied + if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { + if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + } + return nil +} diff --git a/daemonconfig/README.md b/daemonconfig/README.md new file mode 100644 index 0000000000..488e7c7cac --- /dev/null +++ b/daemonconfig/README.md @@ -0,0 +1,3 @@ +This directory contains code pertaining to the configuration of the docker deamon + +These are the configuration settings that you pass to the docker daemon when you launch it with say: `docker -d -e lxc` diff --git a/docker/docker.go b/docker/docker.go index 60f34a1f14..4215ed3a95 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -98,6 +98,9 @@ func main() { } if *flDaemon { + if runtime.GOOS != "linux" { + log.Fatalf("The Docker daemon is only supported on linux") + } if os.Geteuid() != 0 { log.Fatalf("The Docker daemon needs to be run as root") } @@ -185,6 +188,7 @@ func main() { job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) + job.SetenvBool("BufferRequests", true) if err := job.Run(); err != nil { log.Fatal(err) } diff --git a/docs/Dockerfile b/docs/Dockerfile index a907072ddf..694729d89b 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -4,7 +4,7 @@ FROM debian:jessie MAINTAINER Sven Dowideit (@SvenDowideit) -RUN apt-get update && apt-get install -yq make python-pip python-setuptools vim-tiny git pandoc +RUN apt-get update && apt-get install -yq make python-pip python-setuptools vim-tiny git gettext RUN pip install mkdocs diff --git a/docs/README.md b/docs/README.md index 47b390bda4..fa3c501087 100755 --- a/docs/README.md +++ b/docs/README.md @@ -1,37 +1,35 @@ # Docker Documentation -The source for Docker documentation is here under `sources/` and uses -extended Markdown, as implemented by [mkdocs](http://mkdocs.org). +The source for Docker documentation is here under `sources/` and uses extended +Markdown, as implemented by [MkDocs](http://mkdocs.org). -The HTML files are built and hosted on `https://docs.docker.io`, and -update automatically after each change to the master or release branch -of [Docker on GitHub](https://github.com/dotcloud/docker) -thanks to post-commit hooks. The "docs" branch maps to the "latest" -documentation and the "master" (unreleased development) branch maps to -the "master" documentation. +The HTML files are built and hosted on `https://docs.docker.io`, and update +automatically after each change to the master or release branch of [Docker on +GitHub](https://github.com/dotcloud/docker) thanks to post-commit hooks. The +`docs` branch maps to the "latest" documentation and the `master` (unreleased +development) branch maps to the "master" documentation. ## Branches -**There are two branches related to editing docs**: `master` and a -`docs` branch. You should always edit documentation on a local branch -of the `master` branch, and send a PR against `master`. +**There are two branches related to editing docs**: `master` and a `docs` +branch. You should always edit documentation on a local branch of the `master` +branch, and send a PR against `master`. -That way your fixes will automatically get included in later releases, -and docs maintainers can easily cherry-pick your changes into the -`docs` release branch. In the rare case where your change is not -forward-compatible, you may need to base your changes on the `docs` -branch. +That way your fixes will automatically get included in later releases, and docs +maintainers can easily cherry-pick your changes into the `docs` release branch. +In the rare case where your change is not forward-compatible, you may need to +base your changes on the `docs` branch. Also, now that we have a `docs` branch, we can keep the -[http://docs.docker.io](http://docs.docker.io) docs up to date with any -bugs found between `docker` code releases. +[http://docs.docker.io](http://docs.docker.io) docs up to date with any bugs +found between Docker code releases. **Warning**: When *reading* the docs, the -[http://beta-docs.docker.io](http://beta-docs.docker.io) documentation -may include features not yet part of any official docker release. The -`beta-docs` site should be used only for understanding bleeding-edge -development and `docs.docker.io` (which points to the `docs` -branch`) should be used for the latest official release. +[http://beta-docs.docker.io](http://beta-docs.docker.io) documentation may +include features not yet part of any official Docker release. The `beta-docs` +site should be used only for understanding bleeding-edge development and +`docs.docker.io` (which points to the `docs` branch`) should be used for the +latest official release. ## Contributing @@ -41,59 +39,61 @@ branch`) should be used for the latest official release. ## Getting Started -Docker documentation builds are done in a Docker container, which -installs all the required tools, adds the local `docs/` directory and -builds the HTML docs. It then starts a HTTP server on port 8000 so that -you can connect and see your changes. +Docker documentation builds are done in a Docker container, which installs all +the required tools, adds the local `docs/` directory and builds the HTML docs. +It then starts a HTTP server on port 8000 so that you can connect and see your +changes. In the root of the `docker` source directory: make docs -If you have any issues you need to debug, you can use `make docs-shell` and -then run `mkdocs serve` +If you have any issues you need to debug, you can use `make docs-shell` and then +run `mkdocs serve` + +## Style guide + +The documentation is written with paragraphs wrapped at 80 colum lines to make +it easier for terminal use. ### Examples -When writing examples give the user hints by making them resemble what -they see in their shell: +When writing examples give the user hints by making them resemble what they see +in their shell: - Indent shell examples by 4 spaces so they get rendered as code. - Start typed commands with `$ ` (dollar space), so that they are easily -differentiated from program output. + differentiated from program output. - Program output has no prefix. - Comments begin with `# ` (hash space). - In-container shell commands begin with `$$ ` (dollar dollar space). ### Images -When you need to add images, try to make them as small as possible -(e.g. as gifs). Usually images should go in the same directory as the -`.md` file which references them, or in a subdirectory if one already -exists. +When you need to add images, try to make them as small as possible (e.g. as +gifs). Usually images should go in the same directory as the `.md` file which +references them, or in a subdirectory if one already exists. ## Working using GitHub's file editor -Alternatively, for small changes and typos you might want to use -GitHub's built in file editor. It allows you to preview your changes -right on-line (though there can be some differences between GitHub -Markdown and [MkDocs Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). -Just be careful not to create many commits. And you must still -[sign your work!](../CONTRIBUTING.md#sign-your-work) +Alternatively, for small changes and typos you might want to use GitHub's built +in file editor. It allows you to preview your changes right on-line (though +there can be some differences between GitHub Markdown and [MkDocs +Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). Just be +careful not to create many commits. And you must still [sign your +work!](../CONTRIBUTING.md#sign-your-work) ## Publishing Documentation -To publish a copy of the documentation you need a `docs/awsconfig` -file containing AWS settings to deploy to. The release script will +To publish a copy of the documentation you need a `docs/awsconfig` To make life +easier for file containing AWS settings to deploy to. The release script will create an s3 if needed, and will then push the files to it. - [profile dowideit-docs] - aws_access_key_id = IHOIUAHSIDH234rwf.... - aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... - region = ap-southeast-2 + [profile dowideit-docs] aws_access_key_id = IHOIUAHSIDH234rwf.... + aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... region = ap-southeast-2 -The `profile` name must be the same as the name of the bucket you are -deploying to - which you call from the `docker` directory: +The `profile` name must be the same as the name of the bucket you are deploying +to - which you call from the `docker` directory: make AWS_S3_BUCKET=dowideit-docs docs-release diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index c16436e892..e3f9e28196 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -28,15 +28,14 @@ pages: - ['index.md', 'About', 'Docker'] - ['introduction/index.md', '**HIDDEN**'] - ['introduction/understanding-docker.md', 'About', 'Understanding Docker'] -- ['introduction/technology.md', 'About', 'The Technology'] - ['introduction/working-with-docker.md', 'About', 'Working with Docker'] -- ['introduction/get-docker.md', 'About', 'Get Docker'] # Installation: - ['installation/index.md', '**HIDDEN**'] - ['installation/mac.md', 'Installation', 'Mac OS X'] - ['installation/ubuntulinux.md', 'Installation', 'Ubuntu'] - ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux'] +- ['installation/debian.md', 'Installation', 'Debian'] - ['installation/gentoolinux.md', 'Installation', 'Gentoo'] - ['installation/google.md', 'Installation', 'Google Cloud Platform'] - ['installation/rackspace.md', 'Installation', 'Rackspace Cloud'] @@ -57,7 +56,7 @@ pages: - ['examples/hello_world.md', 'Examples', 'Hello World'] - ['examples/nodejs_web_app.md', 'Examples', 'Node.js web application'] - ['examples/python_web_app.md', 'Examples', 'Python web application'] -- ['examples/mongodb.md', 'Examples', 'MongoDB service'] +- ['examples/mongodb.md', 'Examples', 'Dockerizing MongoDB'] - ['examples/running_redis_service.md', 'Examples', 'Redis service'] - ['examples/postgresql_service.md', 'Examples', 'PostgreSQL service'] - ['examples/running_riak_service.md', 'Examples', 'Running a Riak service'] @@ -94,6 +93,7 @@ pages: - ['reference/commandline/index.md', '**HIDDEN**'] - ['reference/commandline/cli.md', 'Reference', 'Command line'] - ['reference/builder.md', 'Reference', 'Dockerfile'] +- ['faq.md', 'Reference', 'FAQ'] - ['reference/run.md', 'Reference', 'Run Reference'] - ['articles/index.md', '**HIDDEN**'] - ['articles/runmetrics.md', 'Reference', 'Runtime metrics'] diff --git a/docs/release.sh b/docs/release.sh index 323887f594..1be6268d70 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -19,7 +19,7 @@ EOF [ "$AWS_S3_BUCKET" ] || usage #VERSION=$(cat VERSION) -BUCKET=$AWS_S3_BUCKET +export BUCKET=$AWS_S3_BUCKET export AWS_CONFIG_FILE=$(pwd)/awsconfig [ -e "$AWS_CONFIG_FILE" ] || usage @@ -37,7 +37,10 @@ setup_s3() { # Make the bucket accessible through website endpoints. echo "make $BUCKET accessible as a website" #aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html - s3conf=$(cat s3_website.json) + s3conf=$(cat s3_website.json | envsubst) + echo + echo $s3conf + echo aws s3api put-bucket-website --bucket $BUCKET --website-configuration "$s3conf" } @@ -54,7 +57,7 @@ upload_current_documentation() { echo " to $dst" echo #s3cmd --recursive --follow-symlinks --preserve --acl-public sync "$src" "$dst" - aws s3 sync --acl public-read --exclude "*.rej" --exclude "*.rst" --exclude "*.orig" --exclude "*.py" "$src" "$dst" + aws s3 sync --cache-control "max-age=3600" --acl public-read --exclude "*.rej" --exclude "*.rst" --exclude "*.orig" --exclude "*.py" "$src" "$dst" } setup_s3 diff --git a/docs/s3_website.json b/docs/s3_website.json index fb14628ce6..2d158cf9de 100644 --- a/docs/s3_website.json +++ b/docs/s3_website.json @@ -6,12 +6,12 @@ "Suffix": "index.html" }, "RoutingRules": [ - { "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, - { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, - { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, - { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } }, - { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "ReplaceKeyPrefixWith": "docker-io/" } }, - { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } } + { "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "jsearch/" } }, + { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-io/" } }, + { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } } ] } diff --git a/docs/sources/articles/runmetrics.md b/docs/sources/articles/runmetrics.md index 50d46047c0..bf4fe21c4e 100644 --- a/docs/sources/articles/runmetrics.md +++ b/docs/sources/articles/runmetrics.md @@ -50,7 +50,7 @@ For Docker containers using cgroups, the container name will be the full ID or long ID of the container. If a container shows up as ae836c95b4c3 in `docker ps`, its long ID might be something like `ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can -look it up with `docker inspect` or `docker ps -notrunc`. +look it up with `docker inspect` or `docker ps --no-trunc`. Putting everything together to look at the memory metrics for a Docker container, take a look at `/sys/fs/cgroup/memory/lxc//`. @@ -310,8 +310,8 @@ layer; you will also have to add traffic going through the userland proxy. Then, you will need to check those counters on a regular basis. If you -happen to use `collectd`, there is a nice plugin to -automate iptables counters collection. +happen to use `collectd`, there is a [nice plugin](https://collectd.org/wiki/index.php/Plugin:IPTables) +to automate iptables counters collection. ### Interface-level counters diff --git a/docs/sources/docker-io/builds.md b/docs/sources/docker-io/builds.md index 0ca058663a..1f6e002208 100644 --- a/docs/sources/docker-io/builds.md +++ b/docs/sources/docker-io/builds.md @@ -7,20 +7,25 @@ page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, ## Trusted Builds *Trusted Builds* is a special feature allowing you to specify a source -repository with a *Dockerfile* to be built by the Docker build clusters. The -system will clone your repository and build the Dockerfile using the repository -as the context. The resulting image will then be uploaded to the registry and -marked as a `Trusted Build`. +repository with a `Dockerfile` to be built by the +[Docker.io](https://index.docker.io) build clusters. The system will +clone your repository and build the `Dockerfile` using the repository as +the context. The resulting image will then be uploaded to the registry +and marked as a *Trusted Build*. Trusted Builds have a number of advantages. For example, users of *your* Trusted Build can be certain that the resulting image was built exactly how it claims to be. -Furthermore, the Dockerfile will be available to anyone browsing your repository +Furthermore, the `Dockerfile` will be available to anyone browsing your repository on the registry. Another advantage of the Trusted Builds feature is the automated builds. This makes sure that your repository is always up to date. -### Linking with a GitHub account +Trusted builds are supported for both public and private repositories on +both [GitHub](http://github.com) and +[BitBucket](https://bitbucket.org/). + +### Setting up Trusted Builds with GitHub In order to setup a Trusted Build, you need to first link your [Docker.io]( https://index.docker.io) account with a GitHub one. This will allow the registry @@ -30,23 +35,28 @@ to see your repositories. > https://index.docker.io) needs to setup a GitHub service hook. Although nothing > else is done with your account, this is how GitHub manages permissions, sorry! -### Creating a Trusted Build +Click on the [Trusted Builds tab](https://index.docker.io/builds/) to +get started and then select [+ Add +New](https://index.docker.io/builds/add/). + +Select the [GitHub +service](https://index.docker.io/associate/github/). + +Then follow the instructions to authorize and link your GitHub account +to Docker.io. + +#### Creating a Trusted Build You can [create a Trusted Build](https://index.docker.io/builds/github/select/) -from any of your public GitHub repositories with a Dockerfile. +from any of your public or private GitHub repositories with a `Dockerfile`. -> **Note:** We currently only support public repositories. To have more than -> one Docker image from the same GitHub repository, you will need to set up one -> Trusted Build per Dockerfile, each using a different image name. This rule -> applies to building multiple branches on the same GitHub repository as well. - -### GitHub organizations +#### GitHub organizations GitHub organizations appear once your membership to that organization is made public on GitHub. To verify, you can look at the members tab for your organization on GitHub. -### GitHub service hooks +#### GitHub service hooks You can follow the below steps to configure the GitHub service hooks for your Trusted Build: @@ -74,9 +84,32 @@ Trusted Build: +### Setting up Trusted Builds with BitBucket + +In order to setup a Trusted Build, you need to first link your +[Docker.io]( https://index.docker.io) account with a BitBucket one. This +will allow the registry to see your repositories. + +Click on the [Trusted Builds tab](https://index.docker.io/builds/) to +get started and then select [+ Add +New](https://index.docker.io/builds/add/). + +Select the [BitBucket +service](https://index.docker.io/associate/bitbucket/). + +Then follow the instructions to authorize and link your BitBucket account +to Docker.io. + +#### Creating a Trusted Build + +You can [create a Trusted +Build](https://index.docker.io/builds/bitbucket/select/) +from any of your public or private BitBucket repositories with a +`Dockerfile`. + ### The Dockerfile and Trusted Builds -During the build process, we copy the contents of your Dockerfile. We also +During the build process, we copy the contents of your `Dockerfile`. We also add it to the [Docker.io](https://index.docker.io) for the Docker community to see on the repository page. @@ -89,14 +122,18 @@ repository's full description. > If you change the full description after a build, it will be > rewritten the next time the Trusted Build has been built. To make changes, > modify the README.md from the Git repository. We will look for a README.md -> in the same directory as your Dockerfile. +> in the same directory as your `Dockerfile`. ### Build triggers -If you need another way to trigger your Trusted Builds outside of GitHub, you -can setup a build trigger. When you turn on the build trigger for a Trusted -Build, it will give you a URL to which you can send POST requests. This will -trigger the Trusted Build process, which is similar to GitHub webhooks. +If you need another way to trigger your Trusted Builds outside of GitHub +or BitBucket, you can setup a build trigger. When you turn on the build +trigger for a Trusted Build, it will give you a URL to which you can +send POST requests. This will trigger the Trusted Build process, which +is similar to GitHub web hooks. + +Build Triggers are available under the Settings tab of each Trusted +Build. > **Note:** > You can only trigger one build at a time and no more than one @@ -105,6 +142,52 @@ trigger the Trusted Build process, which is similar to GitHub webhooks. > You can find the logs of last 10 triggers on the settings page to verify > if everything is working correctly. +### Webhooks + +Also available for Trusted Builds are Webhooks. Webhooks can be called +after a successful repository push is made. + +The web hook call will generate a HTTP POST with the following JSON +payload: + +``` +{ + "push_data":{ + "pushed_at":1385141110, + "images":[ + "imagehash1", + "imagehash2", + "imagehash3" + ], + "pusher":"username" + }, + "repository":{ + "status":"Active", + "description":"my docker repo that does cool things", + "is_trusted":false, + "full_description":"This is my full description", + "repo_url":"https://index.docker.io/u/username/reponame/", + "owner":"username", + "is_official":false, + "is_private":false, + "name":"reponame", + "namespace":"username", + "star_count":1, + "comment_count":1, + "date_created":1370174400, + "dockerfile":"my full dockerfile is listed here", + "repo_name":"username/reponame" + } +} +``` + +Webhooks are available under the Settings tab of each Trusted +Build. + +> **Note:** If you want to test your webhook out then we recommend using +> a tool like [requestb.in](http://requestb.in/). + + ### Repository links Repository links are a way to associate one Trusted Build with another. If one diff --git a/docs/sources/examples/couchdb_data_volumes.md b/docs/sources/examples/couchdb_data_volumes.md index 17490487aa..ec1a0d9476 100644 --- a/docs/sources/examples/couchdb_data_volumes.md +++ b/docs/sources/examples/couchdb_data_volumes.md @@ -28,7 +28,7 @@ We're assuming your Docker host is reachable at `localhost`. If not, replace `localhost` with the public IP of your Docker host. $ HOST=localhost - $ URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/" + $ URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -o '[1-9][0-9]*$')/_utils/" $ echo "Navigate to $URL in your browser, and use the couch interface to add data" ## Create second database @@ -40,7 +40,7 @@ This time, we're requesting shared access to `$COUCH1`'s volumes. ## Browse data on the second database $ HOST=localhost - $ URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/" + $ URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -o '[1-9][0-9]*$')/_utils/" $ echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!' Congratulations, you are now running two Couchdb containers, completely diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md index 4b5f95d023..e397cb0e58 100644 --- a/docs/sources/examples/mongodb.md +++ b/docs/sources/examples/mongodb.md @@ -1,89 +1,164 @@ -page_title: Building a Docker Image with MongoDB -page_description: How to build a Docker image with MongoDB pre-installed -page_keywords: docker, example, package installation, networking, mongodb +page_title: Dockerizing MongoDB +page_description: Creating a Docker image with MongoDB pre-installed using a Dockerfile and sharing the image on Docker.io +page_keywords: docker, dockerize, dockerizing, article, example, docker.io, platform, package, installation, networking, mongodb, containers, images, image, sharing, dockerfile, build, auto-building, virtualization, framework -# Building an Image with MongoDB +# Dockerizing MongoDB -> **Note**: +## Introduction + +In this example, we are going to learn how to build a Docker image +with MongoDB pre-installed. +We'll also see how to `push` that image to the [Docker.io registry]( +https://index.docker.io) and share it with others! + +Using Docker and containers for deploying [MongoDB](https://www.mongodb.org/) +instances will bring several benefits, such as: + + - Easy to maintain, highly configurable MongoDB instances; + - Ready to run and start working within milliseconds; + - Based on globally accessible and shareable images. + +> **Note:** > -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). -> - **If you don't like sudo** then see [*Giving non-root -> access*](/installation/binaries/#dockergroup) +> This example assumes you have Docker running in daemon mode. To verify, +> try running `sudo docker info`. +> For more information, please see: [*Check your Docker installation*]( +> /examples/hello_world/#running-examples). -The goal of this example is to show how you can build your own Docker -images with MongoDB pre-installed. We will do that by constructing a -Dockerfile that downloads a base image, adds an -apt source and installs the database software on Ubuntu. +> **Note:** +> +> If you do **_not_** like `sudo`, you might want to check out: +> [*Giving non-root access*](installation/binaries/#giving-non-root-access). -## Creating a Dockerfile +## Creating a Dockerfile for MongoDB -Create an empty file called Dockerfile: +Let's create our `Dockerfile` and start building it: - $ touch Dockerfile + $ nano Dockerfile -Next, define the parent image you want to use to build your own image on -top of. Here, we'll use [Ubuntu](https://index.docker.io/_/ubuntu/) -(tag: `latest`) available on the [docker -index](http://index.docker.io): +Although optional, it is handy to have comments at the beginning of a +`Dockerfile` explaining its purpose: - FROM ubuntu:latest + # Dockerizing MongoDB: Dockerfile for building MongoDB images + # Based on ubuntu:latest, installs MongoDB following the instructions from: + # http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ -Since we want to be running the latest version of MongoDB we'll need to -add the 10gen repo to our apt sources list. +> **Tip:** `Dockerfile`s are flexible. However, they need to follow a certain +> format. The first item to be defined is the name of an image, which becomes +> the *parent* of your *Dockerized MongoDB* image. - # Add 10gen official apt source to the sources list +We will build our image using the latest version of Ubuntu from the +[Docker.io Ubuntu](https://index.docker.io/_/ubuntu/) repository. + + # Format: FROM repository[:version] + FROM ubuntu:latest + +Continuing, we will declare the `MAINTAINER` of the `Dockerfile`: + + # Format: MAINTAINER Name + MAINTAINER M.Y. Name + +> **Note:** Although Ubuntu systems have MongoDB packages, they are likely to +> be outdated. Therefore in this example, we will use the official MongoDB +> packages. + +We will begin with importing the MongoDB public GPG key. We will also create +a MongoDB repository file for the package manager. + + # Installation: + # Import MongoDB public GPG key AND create a MongoDB list file RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list -Then, we don't want Ubuntu to complain about init not being available so -we'll divert `/sbin/initctl` to -`/bin/true` so it thinks everything is working. +After this initial preparation we can update our packages and install MongoDB. - # Hack for initctl not being available in Ubuntu - RUN dpkg-divert --local --rename --add /sbin/initctl - RUN ln -s /bin/true /sbin/initctl - -Afterwards we'll be able to update our apt repositories and install -MongoDB - - # Install MongoDB + # Update apt-get sources AND install MongoDB RUN apt-get update - RUN apt-get install mongodb-10gen + RUN apt-get install -y -q mongodb-org -To run MongoDB we'll have to create the default data directory (because -we want it to run without needing to provide a special configuration -file) +> **Tip:** You can install a specific version of MongoDB by using a list +> of required packages with versions, e.g.: +> +> RUN apt-get install -y -q mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1 + +MongoDB requires a data directory. Let's create it as the final step of our +installation instructions. # Create the MongoDB data directory RUN mkdir -p /data/db -Finally, we'll expose the standard port that MongoDB runs on, 27107, as -well as define an `ENTRYPOINT` instruction for the -container. +Lastly we set the `ENTRYPOINT` which will tell Docker to run `mongod` inside +the containers launched from our MongoDB image. And for ports, we will use +the `EXPOSE` instruction. + # Expose port 27017 from the container to the host EXPOSE 27017 - ENTRYPOINT ["usr/bin/mongod"] -Now, lets build the image which will go through the -Dockerfile we made and run all of the commands. + # Set usr/bin/mongod as the dockerized entry-point application + ENTRYPOINT usr/bin/mongod - $ sudo docker build -t /mongodb . +Now save the file and let's build our image. -Now you should be able to run `mongod` as a daemon -and be able to connect on the local port! +> **Note:** +> +> The full version of this `Dockerfile` can be found [here](/ +> /examples/mongodb/Dockerfile). - # Regular style - $ MONGO_ID=$(sudo docker run -d /mongodb) +## Building the MongoDB Docker image - # Lean and mean - $ MONGO_ID=$(sudo docker run -d /mongodb --noprealloc --smallfiles) +With our `Dockerfile`, we can now build the MongoDB image using Docker. Unless +experimenting, it is always a good practice to tag Docker images by passing the +`--tag` option to `docker build` command. - # Check the logs out - $ sudo docker logs $MONGO_ID + # Format: sudo docker build --tag/-t / . + # Example: + $ sudo docker build --tag my/repo . - # Connect and play around - $ mongo --port +Once this command is issued, Docker will go through the `Dockerfile` and build +the image. The final image will be tagged `my/repo`. -Sweet! +## Pushing the MongoDB image to Docker.io + +All Docker image repositories can be hosted and shared on +[Docker.io](https://index.docker.io) with the `docker push` command. For this, +you need to be logged-in. + + # Log-in + $ sudo docker login + Username: + .. + + # Push the image + # Format: sudo docker push / + $ sudo docker push my/repo + The push refers to a repository [my/repo] (len: 1) + Sending image list + Pushing repository my/repo (1 tags) + .. + +## Using the MongoDB image + +Using the MongoDB image we created, we can run one or more MongoDB instances +as daemon process(es). + + # Basic way + # Usage: sudo docker run --name -d / + $ sudo docker run --name mongo_instance_001 -d my/repo + + # Dockerized MongoDB, lean and mean! + # Usage: sudo docker run --name -d / --noprealloc --smallfiles + $ sudo docker run --name mongo_instance_001 -d my/repo --noprealloc --smallfiles + + # Checking out the logs of a MongoDB container + # Usage: sudo docker logs + $ sudo docker logs mongo_instance_001 + + # Playing with MongoDB + # Usage: mongo --port + $ mongo --port 12345 + +## Learn more + + - [Linking containers](/use/working_with_links_names/) + - [Cross-host linking containers](/use/ambassador_pattern_linking/) + - [Creating a Trusted Build](/docker-io/builds/#trusted-builds) diff --git a/docs/sources/examples/mongodb/Dockerfile b/docs/sources/examples/mongodb/Dockerfile new file mode 100644 index 0000000000..e7acc0fd85 --- /dev/null +++ b/docs/sources/examples/mongodb/Dockerfile @@ -0,0 +1,24 @@ +# Dockerizing MongoDB: Dockerfile for building MongoDB images +# Based on ubuntu:latest, installs MongoDB following the instructions from: +# http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ + +FROM ubuntu:latest +MAINTAINER Docker + +# Installation: +# Import MongoDB public GPG key AND create a MongoDB list file +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 +RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list + +# Update apt-get sources AND install MongoDB +RUN apt-get update +RUN apt-get install -y -q mongodb-org + +# Create the MongoDB data directory +RUN mkdir -p /data/db + +# Expose port #27017 from the container to the host +EXPOSE 27017 + +# Set usr/bin/mongod as the dockerized entry-point application +ENTRYPOINT usr/bin/mongod diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md index 14d9e647a3..6f0a3e6bb1 100644 --- a/docs/sources/examples/postgresql_service.md +++ b/docs/sources/examples/postgresql_service.md @@ -84,7 +84,7 @@ Build an image from the Dockerfile assign it a name. And run the PostgreSQL server container (in the foreground): - $ sudo docker run -rm -P -name pg_test eg_postgresql + $ sudo docker run --rm -P --name pg_test eg_postgresql There are 2 ways to connect to the PostgreSQL server. We can use [*Link Containers*](/use/working_with_links_names/#working-with-links-names), @@ -101,7 +101,7 @@ Containers can be linked to another container's ports directly using `docker run`. This will set a number of environment variables that can then be used to connect: - $ sudo docker run -rm -t -i -link pg_test:pg eg_postgresql bash + $ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password @@ -143,7 +143,7 @@ prompt, you can create a table and populate it. You can use the defined volumes to inspect the PostgreSQL log files and to backup your configuration and data: - $ docker run -rm --volumes-from pg_test -t -i busybox sh + $ docker run --rm --volumes-from pg_test -t -i busybox sh / # ls bin etc lib linuxrc mnt proc run sys usr diff --git a/docs/sources/examples/python_web_app.md b/docs/sources/examples/python_web_app.md index e761003a9e..f4b76d061d 100644 --- a/docs/sources/examples/python_web_app.md +++ b/docs/sources/examples/python_web_app.md @@ -51,7 +51,7 @@ the `$URL` variable. The container is given a name While this example is simple, you could run any number of interactive commands, try things out, and then exit when you're done. - $ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash + $ sudo docker run -i -t --name pybuilder_run shykes/pybuilder bash $$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz $$ /usr/local/bin/buildapp $URL diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md index 864d10c726..534e22e1b3 100644 --- a/docs/sources/examples/running_ssh_service.md +++ b/docs/sources/examples/running_ssh_service.md @@ -35,12 +35,12 @@ quick access to a test container. Build the image using: - $ sudo docker build -rm -t eg_sshd . + $ sudo docker build --rm -t eg_sshd . Then run it. You can then use `docker port` to find out what host port the container's port 22 is mapped to: - $ sudo docker run -d -P -name test_sshd eg_sshd + $ sudo docker run -d -P --name test_sshd eg_sshd $ sudo docker port test_sshd 22 0.0.0.0:49154 diff --git a/docs/sources/examples/using_supervisord.md b/docs/sources/examples/using_supervisord.md index 29d2fa4525..6fc47b0c03 100644 --- a/docs/sources/examples/using_supervisord.md +++ b/docs/sources/examples/using_supervisord.md @@ -30,7 +30,7 @@ install and manage both an SSH daemon and an Apache daemon. Let's start by creating a basic `Dockerfile` for our new image. - FROM ubuntu:latest + FROM ubuntu:13.04 MAINTAINER examples@docker.io RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list RUN apt-get update diff --git a/docs/sources/index.md b/docs/sources/index.md index d582321563..16f29f5708 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -1,82 +1,99 @@ page_title: About Docker -page_description: Docker introduction home page +page_description: Introduction to Docker. page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile # About Docker -*Secure And Portable Containers Made Easy* +**Develop, Ship and Run Any Application, Anywhere** ## Introduction -[**Docker**](https://www.docker.io) is a container based virtualization -framework. Unlike traditional virtualization Docker is fast, lightweight -and easy to use. Docker allows you to create containers holding -all the dependencies for an application. Each container is kept isolated -from any other, and nothing gets shared. +[**Docker**](https://www.docker.io) is a platform for developers and +sysadmins to develop, ship, and run applications. Docker consists of: -## Docker highlights +* The Docker Engine - our lightweight and powerful open source container + virtualization technology combined with a work flow to help you build + and containerize your applications. +* [Docker.io](https://index.docker.io) - our SAAS service that helps you + share and manage your applications stacks. - - **Containers provide sand-boxing:** - Applications run securely without outside access. - - **Docker allows simple portability:** - Containers are directories, they can be zipped and transported. - - **It all works fast:** - Starting a container is a very fast single process. - - **Docker is easy on the system resources (unlike VMs):** - No more than what each application needs. - - **Agnostic in its _essence_:** - Free of framework, language or platform dependencies. +Docker enables applications to be quickly assembled from components and +eliminates the friction when shipping code. We want to help you get code +from your desktop, tested and deployed into production as fast as +possible. -And most importantly: +## Why Docker? - - **Docker reduces complexity:** - Docker accepts commands *in plain English*, e.g. `docker run [..]`. +- **Faster delivery of your applications** + * We want to help your environment work better. Docker containers, + and the work flow that comes with them, helps your developers, + sysadmins, QA folks, and release engineers work together to get code + into production and doing something useful. We've created a standard + container format that allows developers to care about their applications + inside containers and sysadmins and operators to care about running the + container. This creates a separation of duties that makes managing and + deploying code much easier and much more streamlined. + * We make it easy to build new containers, enable rapid iteration of + your applications and increase the visibility of changes. This + helps everyone in your organization understand how an application works + and how it is built. + * Docker containers are lightweight and fast! Containers have + sub-second launch times! With containers you can reduce the cycle + time in development, testing and deployment. + +- **Deploy and scale more easily** + * Docker containers run (almost!) everywhere. You can deploy your + containers on desktops, physical servers, virtual machines, into + data centers and to public and private clouds. + * As Docker runs on so many platforms it makes it easy to move your + appications around. You can easily move an application from a + testing environment into the cloud and back whenever you need. + * The lightweight containers Docker creates also making scaling and + down really fast and easy. If you need more containers you can + quickly launch them and then shut them down when you don't need them + anymore. + +- **Get higher density and run more workloads** + * Docker containers don't need a hypervisor so you can pack more of + them onto your hosts. This means you get more value out of every + server and can potentially reduce the money you spend on equipment and + licenses! + +- **Faster deployment makes for easier management** + * As Docker speeds up your work flow it makes it easier to make lots + of little changes instead of huge, big bang updates. Smaller + changes mean smaller risks and mean more uptime! ## About this guide -In this introduction we will take you on a tour and show you what -makes Docker tick. +First we'll show you [what makes Docker tick in our Understanding Docker +section](introduction/understanding-docker.md): -On the [**first page**](introduction/understanding-docker.md), which is -**_informative_**: - - - You will find information on Docker; - - And discover Docker's features. - - We will also compare Docker to virtual machines; + - You will find see how Docker works at a high level; + - The architecture of Docker; + - Discover Docker's features; + - See how Docker compares to virtual machines; - And see some common use cases. -> [Click here to go to Understanding Docker](introduction/understanding-docker.md). +> [Click here to go to the Understanding +> Docker section](introduction/understanding-docker.md). -The [**second page**](introduction/technology.md) has **_technical_** information on: +Next we get [**practical** with the Working with Docker +section](introduction/working-with-docker.md) and you can learn about: - - The architecture of Docker; - - The underlying technology, and; - - *How* Docker works. + - Docker on the command line; + - Get introduced to your first Docker commands; + - Get to know your way around the basics of Docker operation. -> [Click here to go to Understanding the Technology](introduction/technology.md). +> [Click here to go to the Working with +> Docker section](introduction/working-with-docker.md). -On the [**third page**](introduction/working-with-docker.md) we get **_practical_**. -There you can: - - - Learn about Docker's components (i.e. Containers, Images and the - Dockerfile); - - And get started working with them straight away. - -> [Click here to go to Working with Docker](introduction/working-with-docker.md). - -Finally, on the [**fourth**](introduction/get-docker.md) page, we go **_hands on_** -and see: - - - The installation instructions, and; - - How Docker makes some hard problems much, much easier. - -> [Click here to go to Get Docker](introduction/get-docker.md). +If you want to see how to install Docker you can jump to the +[installation](/installation/#installation) section. > **Note**: -> We know how valuable your time is. Therefore, the documentation is prepared -> in a way to allow anyone to start from any section need. Although we strongly -> recommend that you visit [Understanding Docker]( -> introduction/understanding-docker.md) to see how Docker is different, if you -> already have some knowledge and want to quickly get started with Docker, -> don't hesitate to jump to [Working with Docker]( -> introduction/working-with-docker.md). +> We know how valuable your time is so you if you want to get started +> with Docker straight away don't hesitate to jump to [Working with +> Docker](introduction/working-with-docker.md). For a fuller +> understanding of Docker though we do recommend you read [Understanding +> Docker]( introduction/understanding-docker.md). diff --git a/docs/sources/installation.md b/docs/sources/installation.md index 66b28b2b3c..1c3c726594 100644 --- a/docs/sources/installation.md +++ b/docs/sources/installation.md @@ -12,6 +12,7 @@ techniques for installing Docker all the time. - [Ubuntu](ubuntulinux/) - [Red Hat Enterprise Linux](rhel/) - [Fedora](fedora/) + - [Debian](debian/) - [Arch Linux](archlinux/) - [CRUX Linux](cruxlinux/) - [Gentoo](gentoolinux/) @@ -22,4 +23,4 @@ techniques for installing Docker all the time. - [Amazon EC2](amazon/) - [Rackspace Cloud](rackspace/) - [Google Cloud Platform](google/) - - [Binaries](binaries/) \ No newline at end of file + - [Binaries](binaries/) diff --git a/docs/sources/installation/cruxlinux.md b/docs/sources/installation/cruxlinux.md index d1a4de7367..6f0bfff74a 100644 --- a/docs/sources/installation/cruxlinux.md +++ b/docs/sources/installation/cruxlinux.md @@ -17,50 +17,24 @@ page_keywords: crux linux, virtualization, Docker, documentation, installation > some binaries to be updated and published. Installing on CRUX Linux can be handled via the ports from [James -Mills](http://prologic.shortcircuit.net.au/): +Mills](http://prologic.shortcircuit.net.au/) and are included in the +official [contrib](http://crux.nu/portdb/?a=repo&q=contrib) ports: -- [docker](https://bitbucket.org/prologic/ports/src/tip/docker/) -- [docker-bin](https://bitbucket.org/prologic/ports/src/tip/docker-bin/) -- [docker-git](https://bitbucket.org/prologic/ports/src/tip/docker-git/) +- docker +- docker-bin The `docker` port will install the latest tagged version of Docker. The `docker-bin` port will -install the latest tagged versin of Docker from upstream built binaries. -The `docker-git` package will build from the current -master branch. +install the latest tagged version of Docker from upstream built binaries. ## Installation -For the time being (*until the CRUX Docker port(s) get into the official -contrib repository*) you will need to install [James -Mills`](https://bitbucket.org/prologic/ports) ports repository. You can -do so via: +Assuming you have contrib enabled, update your ports tree and install docker (*as root*): -Download the `httpup` file to -`/etc/ports/`: + # prt-get depinst docker - $ curl -q -o - http://crux.nu/portdb/?a=getup&q=prologic > /etc/ports/prologic.httpup +You can install `docker-bin` instead if you wish to avoid compilation time. -Add `prtdir /usr/ports/prologic` to -`/etc/prt-get.conf`: - - $ vim /etc/prt-get.conf - - # or: - $ echo "prtdir /usr/ports/prologic" >> /etc/prt-get.conf - -Update ports and prt-get cache: - - $ ports -u - $ prt-get cache - -To install (*and its dependencies*): - - $ prt-get depinst docker - -Use `docker-bin` for the upstream binary or -`docker-git` to build and install from the master -branch from git. ## Kernel Requirements @@ -68,24 +42,34 @@ To have a working **CRUX+Docker** Host you must ensure your Kernel has the necessary modules enabled for LXC containers to function correctly and Docker Daemon to work properly. -Please read the `README.rst`: +Please read the `README`: $ prt-get readme docker -There is a `test_kernel_config.sh` script in the -above ports which you can use to test your Kernel configuration: +The `docker` and `docker-bin` ports install the `contrib/check-config.sh` +script provided by the Docker contributors for checking your kernel +configuration as a suitable Docker Host. - $ cd /usr/ports/prologic/docker - $ ./test_kernel_config.sh /usr/src/linux/.config + $ /usr/share/docker/check-config.sh ## Starting Docker -There is a rc script created for Docker. To start the Docker service: +There is a rc script created for Docker. To start the Docker service (*as root*): - $ sudo su - - $ /etc/rc.d/docker start + # /etc/rc.d/docker start To start on system boot: - Edit `/etc/rc.conf` - Put `docker` into the `SERVICES=(...)` array after `net`. + +## Issues + +If you have any issues please file a bug with the +[CRUX Bug Tracker](http://crux.nu/bugs/). + +## Support + +For support contact the [CRUX Mailing List](http://crux.nu/Main/MailingLists) +or join CRUX's [IRC Channels](http://crux.nu/Main/IrcChannels). on the +[FreeNode](http://freenode.net/) IRC Network. diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md new file mode 100644 index 0000000000..8f4f31c29d --- /dev/null +++ b/docs/sources/installation/debian.md @@ -0,0 +1,78 @@ +page_title: Installation on Debian +page_description: Instructions for installing Docker on Debian +page_keywords: Docker, Docker documentation, installation, debian + +# Debian + +> **Note**: +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see +> our blog post, [Getting to Docker 1.0]( +> http://blog.docker.io/2013/08/getting-to-docker-1-0/) + +Docker is supported on the following versions of Debian: + + - [*Debian 8.0 Jessie (64-bit)*](#debian-jessie-8-64-bit) + +## Debian Jessie 8.0 (64-bit) + +Debian 8 comes with a 3.14.0 Linux kernel, and a `docker.io` package which +installs all its prerequisites from Debian's repository. + +> **Note**: +> Debian contains a much older KDE3/GNOME2 package called ``docker``, so the +> package and the executable are called ``docker.io``. + +### Installation + +To install the latest Debian package (may not be the latest Docker release): + + $ sudo apt-get update + $ sudo apt-get install docker.io + $ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker + $ sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io + +To verify that everything has worked as expected: + + $ sudo docker run -i -t ubuntu /bin/bash + +Which should download the `ubuntu` image, and then start `bash` in a container. + +> **Note**: +> If you want to enable memory and swap accounting see +> [this](/installation/ubuntulinux/#memory-and-swap-accounting). + +### Giving non-root access + +The `docker` daemon always runs as the `root` user, and since Docker +version 0.5.2, the `docker` daemon binds to a Unix socket instead of a +TCP port. By default that Unix socket is owned by the user `root`, and +so, by default, you can access it with `sudo`. + +Starting in version 0.5.3, if you (or your Docker installer) create a +Unix group called `docker` and add users to it, then the `docker` daemon +will make the ownership of the Unix socket read/writable by the `docker` +group when the daemon starts. The `docker` daemon must always run as the +root user, but if you run the `docker` client as a user in the `docker` +group then you don't need to add `sudo` to all the client commands. From +Docker 0.9.0 you can use the `-G` flag to specify an alternative group. + +> **Warning**: +> The `docker` group (or the group specified with the `-G` flag) is +> `root`-equivalent; see [*Docker Daemon Attack Surface*]( +> /articles/security/#dockersecurity-daemon) details. + +**Example:** + + # Add the docker group if it doesn't already exist. + $ sudo groupadd docker + + # Add the connected user "${USER}" to the docker group. + # Change the user name to match your preferred user. + # You may have to logout and log back in again for + # this to take effect. + $ sudo gpasswd -a ${USER} docker + + # Restart the Docker daemon. + $ sudo service docker restart + diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md index 93b5b05b13..20ab4477d8 100644 --- a/docs/sources/installation/fedora.md +++ b/docs/sources/installation/fedora.md @@ -32,7 +32,7 @@ it. To proceed with `docker-io` installation on Fedora 19, please remove $ sudo yum -y remove docker -For Fedora 20 and later, the `wmdocker` package will +For Fedora 21 and later, the `wmdocker` package will provide the same functionality as `docker` and will also not conflict with `docker-io`. diff --git a/docs/sources/installation/google.md b/docs/sources/installation/google.md index 4c22808dcb..29ffe0d73f 100644 --- a/docs/sources/installation/google.md +++ b/docs/sources/installation/google.md @@ -19,17 +19,16 @@ page_keywords: Docker, Docker documentation, installation, google, Google Comput https://developers.google.com/cloud/sdk/) to use your project with the following commands: - - + ``` $ curl https://dl.google.com/dl/cloudsdk/release/install_google_cloud_sdk.bash | bash $ gcloud auth login Enter a cloud project id (or leave blank to not set): + ``` 3. Start a new instance, select a zone close to you and the desired instance size: - - + ``` $ gcutil addinstance docker-playground --image=backports-debian-7 1: europe-west1-a ... @@ -39,25 +38,26 @@ page_keywords: Docker, Docker documentation, installation, google, Google Comput ... 12: machineTypes/g1-small >>> + ``` 4. Connect to the instance using SSH: - - + ``` $ gcutil ssh docker-playground $ docker-playground:~$ + ``` 5. Install the latest Docker release and configure it to start when the instance boots: - - + ``` $ docker-playground:~$ curl get.docker.io | bash $ docker-playground:~$ sudo update-rc.d docker defaults + ``` 6. Start a new container: - - + ``` $ docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/' $ docker on GCE \o/ + ``` diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index c30e0b6440..45e67e8d01 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -41,9 +41,9 @@ and install VirtualBox. > Do not simply copy the package without running the > installer. -## Installing boot2docker +## Installing boot2docker manually -### Installing manually +### Downloading the boot2docker script [boot2docker](https://github.com/boot2docker/boot2docker) provides a handy script to manage the VM running the Docker daemon. It also takes @@ -153,7 +153,7 @@ option, ports 49000-49900, and run the following command. If you feel the need to connect to the VM, you can simply run: - $ ./boot2docker ssh + $ boot2docker ssh # User: docker # Pwd: tcuser diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md index d40e17b646..876eb5d290 100644 --- a/docs/sources/installation/ubuntulinux.md +++ b/docs/sources/installation/ubuntulinux.md @@ -1,6 +1,6 @@ page_title: Installation on Ubuntu -page_description: Please note this project is currently under heavy development. It should not be used in production. -page_keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux +page_description: Instructions for installing Docker on Ubuntu. +page_keywords: Docker, Docker documentation, requirements, virtualbox, installation, ubuntu # Ubuntu @@ -36,6 +36,7 @@ To install the latest Ubuntu package (may not be the latest Docker release): $ sudo apt-get update $ sudo apt-get install docker.io $ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker + $ sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io To verify that everything has worked as expected: @@ -169,26 +170,23 @@ World*](/examples/hello_world/#hello-world) example. ### Giving non-root access -The `docker` daemon always runs as the root user, -and since Docker version 0.5.2, the `docker` daemon -binds to a Unix socket instead of a TCP port. By default that Unix -socket is owned by the user *root*, and so, by default, you can access -it with `sudo`. +The `docker` daemon always runs as the `root` user, and since Docker +version 0.5.2, the `docker` daemon binds to a Unix socket instead of a +TCP port. By default that Unix socket is owned by the user `root`, and +so, by default, you can access it with `sudo`. Starting in version 0.5.3, if you (or your Docker installer) create a -Unix group called *docker* and add users to it, then the -`docker` daemon will make the ownership of the Unix -socket read/writable by the *docker* group when the daemon starts. The -`docker` daemon must always run as the root user, -but if you run the `docker` client as a user in the -*docker* group then you don't need to add `sudo` to -all the client commands. As of 0.9.0, you can specify that a group other -than `docker` should own the Unix socket with the -`-G` option. +Unix group called `docker` and add users to it, then the `docker` daemon +will make the ownership of the Unix socket read/writable by the `docker` +group when the daemon starts. The `docker` daemon must always run as the +`root` user, but if you run the `docker` client as a user in the +`docker` group then you don't need to add `sudo` to all the client +commands. From Docker 0.9.0 you can use the `-G` flag to specify an +alternative group. > **Warning**: -> The *docker* group (or the group specified with `-G`) is -> root-equivalent; see [*Docker Daemon Attack Surface*]( +> The `docker` group (or the group specified with the `-G` flag) is +> `root`-equivalent; see [*Docker Daemon Attack Surface*]( > /articles/security/#dockersecurity-daemon) details. **Example:** @@ -203,6 +201,7 @@ than `docker` should own the Unix socket with the $ sudo gpasswd -a ${USER} docker # Restart the Docker daemon. + # If you are in Ubuntu 14.04, use docker.io instead of docker $ sudo service docker restart ### Upgrade diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index ec633508c4..189be00748 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -59,10 +59,34 @@ Let's try the “hello world” example. Run This will download the small busybox image and print hello world. -## Observations +## Persistent storage -### Persistent storage +1. Add a virtual hard drive to the VM created in Installation +2. Start the VM +3. Create an empty partition on the attached virtual hard drive -The virtual machine created above lacks any persistent data storage. All -images and containers will be lost when shutting down or rebooting the -VM. + ```sh + sudo fdisk /dev/sda + n (new partition) + p (primary partition) + 1 (partition 1) + w (write changes to disk) + ``` + +4. Format the partition using ext4 + + ```sh + mkfs.ext4 -L boot2docker-data /dev/sda1 + ``` + +5. Reboot + + ```sh + sudo reboot + ``` + +6. boot2docker should now auto mount the partition and persist data there. (/var/lib/docker linking to /mnt/sda1/var/lib/docker) + + ```sh + ls -l /var/lib + ``` diff --git a/docs/sources/introduction/get-docker.md b/docs/sources/introduction/get-docker.md deleted file mode 100644 index e0d6f16654..0000000000 --- a/docs/sources/introduction/get-docker.md +++ /dev/null @@ -1,77 +0,0 @@ -page_title: Getting Docker -page_description: Getting Docker and installation tutorials -page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile - -# Getting Docker - -*How to install Docker?* - -## Introductions - -Once you are comfortable with your level of knowledge of Docker, and -feel like actually trying the product, you can download and start using -it by following the links listed below. There, you will find -installation instructions, specifically tailored for your platform of choice. - -## Installation Instructions - -### Linux (Native) - - - **Arch Linux:** - [Installation on Arch Linux](../installation/archlinux.md) - - **Fedora:** - [Installation on Fedora](../installation/fedora.md) - - **FrugalWare:** - [Installation on FrugalWare](../installation/frugalware.md) - - **Gentoo:** - [Installation on Gentoo](../installation/gentoolinux.md) - - **Red Hat Enterprise Linux:** - [Installation on Red Hat Enterprise Linux](../installation/rhel.md) - - **Ubuntu:** - [Installation on Ubuntu](../installation/ubuntulinux.md) - - **openSUSE:** - [Installation on openSUSE](../installation/openSUSE.md) - -### Mac OS X (Using Boot2Docker) - -In order to work, Docker makes use of some Linux Kernel features which -are not supported by Mac OS X. To run Docker on OS X we install and run -a lightweight virtual machine and run Docker on that. - - - **Mac OS X :** - [Installation on Mac OS X](../installation/mac.md) - -### Windows (Using Boot2Docker) - -Docker can also run on Windows using a virtual machine. You then run -Linux and Docker inside that virtual machine. - - - **Windows:** - [Installation on Windows](../installation/windows.md) - -### Infrastructure-as-a-Service - - - **Amazon EC2:** - [Installation on Amazon EC2](../installation/amazon.md) - - **Google Cloud Platform:** - [Installation on Google Cloud Platform](../installation/google.md) - - **Rackspace Cloud:** - [Installation on Rackspace Cloud](../installation/rackspace.md) - -## Where to go from here - -### Understanding Docker - -Visit [Understanding Docker](understanding-docker.md) in our Getting Started manual. - -### Learn about parts of Docker and the underlying technology - -Visit [Understanding the Technology](technology.md) in our Getting Started manual. - -### Get practical and learn how to use Docker straight away - -Visit [Working with Docker](working-with-docker.md) in our Getting Started manual. - -### Get the whole story - -[https://www.docker.io/the_whole_story/](https://www.docker.io/the_whole_story/) diff --git a/docs/sources/introduction/technology.md b/docs/sources/introduction/technology.md deleted file mode 100644 index a724e4aae6..0000000000 --- a/docs/sources/introduction/technology.md +++ /dev/null @@ -1,268 +0,0 @@ -page_title: Understanding the Technology -page_description: Technology of Docker explained in depth -page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile - -# Understanding the Technology - -*What is the architecture of Docker? What is its underlying technology?* - -## Introduction - -When it comes to understanding Docker and its underlying technology -there is no *magic* involved. Everything is based on tried and tested -features of the *Linux kernel*. Docker either makes use of those -features directly or builds upon them to provide new functionality. - -Aside from the technology, one of the major factors that make Docker -great is the way it is built. The project's core is very lightweight and -as much of Docker as possible is designed to be pluggable. Docker is -also built with integration in mind and has a fully featured API that -allows you to access all of the power of Docker from inside your own -applications. - -## The Architecture of Docker - -Docker is designed for developers and sysadmins. It's built to help you -build applications and services and then deploy them quickly and -efficiently: from development to production. - -Let's take a look. - --- Docker is a client-server application. --- Both the Docker client and the daemon *can* run on the same system, or; --- You can connect a Docker client with a remote Docker daemon. --- They communicate via sockets or through a RESTful API. --- Users interact with the client to command the daemon, e.g. to create, run, and stop containers. --- The daemon, receiving those commands, does the job, e.g. run a container, stop a container. - -![Docker Architecture Diagram](/article-img/architecture.svg) - -## The components of Docker - -Docker's main components are: - - - Docker *daemon*; - - Docker *client*, and; - - [Docker.io](https://index.docker.io) registry. - -### The Docker daemon - -As shown on the diagram above, the Docker daemon runs on a host machine. -The user does not directly interact with the daemon, but instead through -an intermediary: the Docker client. - -### Docker client - -The Docker client is the primary user interface to Docker. It is tasked -with accepting commands from the user and communicating back and forth -with a Docker daemon to manage the container lifecycle on any host. - -### Docker.io registry - -[Docker.io](https://index.docker.io) is the global archive (and -directory) of user supplied Docker container images. It currently hosts -a large – in fact, rapidly growing – number of projects where you -can find almost any popular application or deployment stack readily -available to download and run with a single command. - -As a social community project, Docker tries to provide all necessary -tools for everyone to grow with other *Dockers*. By issuing a single -command through the Docker client you can start sharing your own -creations with the rest of the world. - -However, knowing that not everything can be shared the [Docker.io]( -https://index.docker.io) also offers private repositories. In order to see -the available plans, you can click [here](https://index.docker.io/plans). - -Using [*docker-registry*](https://github.com/dotcloud/docker-registry), it is -also possible to run your own private Docker image registry service on your own -servers. - -> **Note:** To learn more about the [*Docker.io*](http://index.docker.io) -> registry (for public *and* private repositories), check out the [Registry & -> Index Spec](http://docs.docker.io/api/registry_index_spec/). - -### Summary - - - **When you install Docker, you get all the components:** - The daemon, the client and access to the [Docker.io](http://index.docker.io) registry. - - **You can run these components together or distributed:** - Servers with the Docker daemon running, controlled by the Docker client. - - **You can benefit form the public registry:** - Download and build upon images created by the community. - - **You can start a private repository for proprietary use.** - Sign up for a [plan](https://index.docker.io/plans) or host your own [docker-registry]( -https://github.com/dotcloud/docker-registry). - -## Elements of Docker - -The basic elements of Docker are: - - - **Containers, which allow:** - The run portion of Docker. Your applications run inside of containers. - - **Images, which provide:** - The build portion of Docker. Your containers are built from images. - - **The Dockerfile, which automates:** - A file that contains simple instructions that build Docker images. - -To get practical and learn what they are, and **_how to work_** with -them, continue to [Working with Docker](working-with-docker.md). If you would like to -understand **_how they work_**, stay here and continue reading. - -## The underlying technology - -The power of Docker comes from the underlying technology it is built -from. A series of operating system features are carefully glued together -to provide Docker's features and provide an easy to use interface to -those features. In this section, we will see the main operating system -features that Docker uses to make easy containerization happen. - -### Namespaces - -Docker takes advantage of a technology called `namespaces` to provide -an isolated workspace we call a *container*. When you run a container, -Docker creates a set of *namespaces* for that container. - -This provides a layer of isolation: each process runs in its own -namespace and does not have access outside it. - -Some of the namespaces Docker uses are: - - - **The `pid` namespace:** - Used for process numbering (PID: Process ID) - - **The `net` namespace:** - Used for managing network interfaces (NET: Networking) - - **The `ipc` namespace:** - Used for managing access to IPC resources (IPC: InterProcess Communication) - - **The `mnt` namespace:** - Used for managing mount-points (MNT: Mount) - - **The `uts` namespace:** - Used for isolating kernel / version identifiers. (UTS: Unix Timesharing System) - -### Control groups - -Docker also makes use of another technology called `cgroups` or control -groups. A key need to run applications in isolation is to have them -contained, not just in terms of related filesystem and/or dependencies, -but also, resources. Control groups allow Docker to fairly -share available hardware resources to containers and if asked, set up to -limits and constraints, for example limiting the memory to a maximum of 128 -MBs. - -### UnionFS - -UnionFS or union filesystems are filesystems that operate by creating -layers, making them very lightweight and fast. Docker uses union -filesystems to provide the building blocks for containers. We'll see -more about this below. - -### Containers - -Docker combines these components to build a container format we call -`libcontainer`. Docker also supports traditional Linux containers like -[LXC](https://linuxcontainers.org/) which also make use of these -components. - -## How does everything work - -A lot happens when Docker creates a container. - -Let's see how it works! - -### How does a container work? - -A container consists of an operating system, user added files and -meta-data. Each container is built from an image. That image tells -Docker what the container holds, what process to run when the container -is launched and a variety of other configuration data. The Docker image -is read-only. When Docker runs a container from an image it adds a -read-write layer on top of the image (using the UnionFS technology we -saw earlier) to run inside the container. - -### What happens when you run a container? - -The Docker client (or the API!) tells the Docker daemon to run a -container. Let's take a look at a simple `Hello world` example. - - $ docker run -i -t ubuntu /bin/bash - -Let's break down this command. The Docker client is launched using the -`docker` binary. The bare minimum the Docker client needs to tell the -Docker daemon is: - -* What Docker image to build the container from; -* The command you want to run inside the container when it is launched. - -So what happens under the covers when we run this command? - -Docker begins with: - - - **Pulling the `ubuntu` image:** - Docker checks for the presence of the `ubuntu` image and if it doesn't - exist locally on the host, then Docker downloads it from [Docker.io](https://index.docker.io) - - **Creates a new container:** - Once Docker has the image it creates a container from it. - - **Allocates a filesystem and mounts a read-write _layer_:** - The container is created in the filesystem and a read-write layer is added to the image. - - **Allocates a network / bridge interface:** - Creates a network interface that allows the Docker container to talk to the local host. - - **Sets up an IP address:** - Intelligently finds and attaches an available IP address from a pool. - - **Executes _a_ process that you specify:** - Runs your application, and; - - **Captures and provides application output:** - Connects and logs standard input, outputs and errors for you to see how your application is running. - -### How does a Docker Image work? - -We've already seen that Docker images are read-only templates that -Docker containers are launched from. When you launch that container it -creates a read-write layer on top of that image that your application is -run in. - -Docker images are built using a simple descriptive set of steps we -call *instructions*. Instructions are stored in a file called a -`Dockerfile`. Each instruction writes a new layer to an image using the -UnionFS technology we saw earlier. - -Every image starts from a base image, for example `ubuntu` a base Ubuntu -image or `fedora` a base Fedora image. Docker builds and provides these -base images via [Docker.io](http://index.docker.io). - -### How does a Docker registry work? - -The Docker registry is a store for your Docker images. Once you build a -Docker image you can *push* it to a public or private repository on [Docker.io]( -http://index.docker.io) or to your own registry running behind your firewall. - -Using the Docker client, you can search for already published images and -then pull them down to your Docker host to build containers from them -(or even build on these images). - -[Docker.io](http://index.docker.io) provides both public and -private storage for images. Public storage is searchable and can be -downloaded by anyone. Private repositories are excluded from search -results and only you and your users can pull them down and use them to -build containers. You can [sign up for a plan here](https://index.docker.io/plans). - -To learn more, check out the [Working with Repositories]( -http://docs.docker.io/use/workingwithrepository) section from the -[Docker documentation](http://docs.docker.io). - -## Where to go from here - -### Understanding Docker - -Visit [Understanding Docker](understanding-docker.md) in our Getting Started manual. - -### Get practical and learn how to use Docker straight away - -Visit [Working with Docker](working-with-docker.md) in our Getting Started manual. - -### Get the product and go hands-on - -Visit [Get Docker](get-docker.md) in our Getting Started manual. - -### Get the whole story - -[https://www.docker.io/the_whole_story/](https://www.docker.io/the_whole_story/) diff --git a/docs/sources/introduction/understanding-docker.md b/docs/sources/introduction/understanding-docker.md index 53f5e43179..1d99be7046 100644 --- a/docs/sources/introduction/understanding-docker.md +++ b/docs/sources/introduction/understanding-docker.md @@ -1,38 +1,129 @@ page_title: Understanding Docker page_description: Docker explained in depth -page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile +page_keywords: docker, introduction, documentation, about, technology, understanding # Understanding Docker -*What is Docker? What makes it great?* +**What is Docker?** -Building development lifecycles, pipelines and deployment tooling is -hard. It's not easy to create portable applications and services. -There's often high friction getting code from your development -environment to production. It's also hard to ensure those applications -and services are consistent, up-to-date and managed. +Docker is a platform for developing, shipping, and running applications. +Docker is designed to deliver your applications faster. With Docker you +can separate your applications from your infrastructure AND treat your +infrastructure like a managed application. We want to help you ship code +faster, test faster, deploy faster and shorten the cycle between writing +code and running code. -Docker is designed to solve these problem for both developers and -sysadmins. It is a lightweight framework (with a powerful API) that -provides a lifecycle for building and deploying applications into -containers. +Docker does this by combining a lightweight container virtualization +platform with workflow and tooling that helps you manage and deploy your +applications. -Docker provides a way to run almost any application securely isolated -into a container. The isolation and security allows you to run many -containers simultaneously on your host. The lightweight nature of +At its core Docker provides a way to run almost any application securely +isolated into a container. The isolation and security allows you to run +many containers simultaneously on your host. The lightweight nature of containers, which run without the extra overload of a hypervisor, means you can get more out of your hardware. -**Note:** Docker itself is *shipped* with the Apache 2.0 license and it -is completely open-source — *the pun? very much intended*. +Surrounding the container virtualization, we provide tooling and a +platform to help you get your applications (and its supporting +components) into Docker containers, to distribute and ship those +containers to your teams to develop and test on them and then to deploy +those applications to your production environment whether it be in a +local data center or the Cloud. -### What are the Docker basics I need to know? +## What can I use Docker for? -Docker has three major components: +* Faster delivery of your applications + +Docker is perfect for helping you with the development lifecycle. Docker +can allow your developers to develop on local containers that contain +your applications and services. It can integrate into a continuous +integration and deployment workflow. + +Your developers write code locally and share their development stack via +Docker with their colleagues. When they are ready they can push their +code and the stack they are developing on to a test environment and +execute any required tests. From the testing environment you can then +push your Docker images into production and deploy your code. + +* Deploy and scale more easily + +Docker's container platform allows you to have highly portable +workloads. Docker containers can run on a developer's local host, on +physical or virtual machines in a data center or in the Cloud. + +Docker's portability and lightweight nature also makes managing +workloads dynamically easy. You can use Docker to build and scale out +applications and services. Docker's speed means that scaling can be near +real time. + +* Get higher density and run more workloads + +Docker is lightweight and fast. It provides a viable (and +cost-effective!) alternative to hypervisor-based virtual machines. This +is especially useful in high density environments, for example building +your own Cloud or Platform-as-a-Service. But it is also useful +for small and medium deployments where you want to get more out of the +resources you have. + +## What are the major Docker components? + +Docker has two major components: + +* Docker: the open source container virtualization platform. +* [Docker.io](https://index.docker.io): our Software-as-a-Service + platform for sharing and managing Docker containers. + +**Note:** Docker is licensed with the open source Apache 2.0 license. + +## What is the architecture of Docker? + +Docker has a client-server architecture. The Docker *client* talks to +the Docker *daemon* which does the heavy lifting of building, running +and distributing your Docker containers. Both the Docker client and the +daemon *can* run on the same system, or you can connect a Docker client +with a remote Docker daemon. The Docker client and service can +communicate via sockets or through a RESTful API. + +![Docker Architecture Diagram](/article-img/architecture.svg) + +### The Docker daemon + +As shown on the diagram above, the Docker daemon runs on a host machine. +The user does not directly interact with the daemon, but instead through +the Docker client. + +### The Docker client + +The Docker client, in the form of the `docker` binary, is the primary user +interface to Docker. It is tasked with accepting commands from the user +and communicating back and forth with a Docker daemon. + +### Inside Docker + +Inside Docker there are three concepts we’ll need to understand: -* Docker containers. * Docker images. * Docker registries. +* Docker containers. + +#### Docker images + +The Docker image is a read-only template, for example an Ubuntu operating system +with Apache and your web application installed. Docker containers are +created from images. You can download Docker images that other people +have created or Docker provides a simple way to build new images or +update existing images. You can consider Docker images to be the **build** +portion of Docker. + +#### Docker Registries + +Docker registries hold images. These are public (or private!) stores +that you can upload or download images to and from. The public Docker +registry is called [Docker.io](http://index.docker.io). It provides a +huge collection of existing images that you can use. These images can be +images you create yourself or you can make use of images that others +have previously created. You can consider Docker registries the +**distribution** portion of Docker. #### Docker containers @@ -40,233 +131,201 @@ Docker containers are like a directory. A Docker container holds everything that is needed for an application to run. Each container is created from a Docker image. Docker containers can be run, started, stopped, moved and deleted. Each container is an isolated and secure -application platform. You can consider Docker containers the *run* -portion of the Docker framework. +application platform. You can consider Docker containers the **run** +portion of Docker. -#### Docker images +## So how does Docker work? -The Docker image is a template, for example an Ubuntu -operating system with Apache and your web application installed. Docker -containers are launched from images. Docker provides a simple way to -build new images or update existing images. You can consider Docker -images to be the *build* portion of the Docker framework. +We've learned so far that: -#### Docker Registries - -Docker registries hold images. These are public (or private!) stores -that you can upload or download images to and from. These images can be -images you create yourself or you can make use of images that others -have previously created. Docker registries allow you to build simple and -powerful development and deployment work flows. You can consider Docker -registries the *share* portion of the Docker framework. - -### How does Docker work? - -Docker is a client-server framework. The Docker *client* commands the Docker -*daemon*, which in turn creates, builds and manages containers. - -The Docker daemon takes advantage of some neat Linux kernel and -operating system features, like `namespaces` and `cgroups`, to build -isolated container. Docker provides a simple abstraction layer to these -technologies. - -> **Note:** If you would like to learn more about the underlying technology, -> why not jump to [Understanding the Technology](technology.md) where we talk about them? You can -> always come back here to continue learning about features of Docker and what -> makes it different. - -## Features of Docker - -In order to get a good grasp of the capabilities of Docker you should -read the [User's Manual](http://docs.docker.io). Let's look at a summary -of Docker's features to give you an idea of how Docker might be useful -to you. - -### User centric and simple to use - -*Docker is made for humans.* - -It's easy to get started and easy to build and deploy applications with -Docker: or as we say "*dockerize*" them! As much of Docker as possible -uses plain English for commands and tries to be as lightweight and -transparent as possible. We want to get out of the way so you can build -and deploy your applications. - -### Docker is Portable - -*Dockerize And Go!* - -Docker containers are highly portable. Docker provides a standard -container format to hold your applications: - -* You take care of your applications inside the container, and; -* Docker takes care of managing the container. - -Any machine, be it bare-metal or virtualized, can run any Docker -container. The sole requirement is to have Docker installed. - -**This translates to:** - - - Reliability; - - Freeing your applications out of the dependency-hell; - - A natural guarantee that things will work, anywhere. - -### Lightweight - -*No more resources waste.* - -Containers are lightweight, in fact, they are extremely lightweight. -Unlike traditional virtual machines, which have the overhead of a -hypervisor, Docker relies on operating system level features to provide -isolation and security. A Docker container does not need anything more -than what your application needs to run. - -This translates to: - - - Ability to deploy a large number of applications on a single system; - - Lightning fast start up times and reduced overhead. - -### Docker can run anything - -*An amazing host! (again, pun intended.)* - -Docker isn't prescriptive about what applications or services you can run -inside containers. We provide use cases and examples for running web -services, databases, applications - just about anything you can imagine -can run in a Docker container. - -**This translates to:** - - - Ability to run a wide range of applications; - - Ability to deploy reliably without repeating yourself. - -### Plays well with others - -*A wonderful guest.* - -Today, it is possible to install and use Docker almost anywhere. Even on -non-Linux systems such as Windows or Mac OS X thanks to a project called -[Boot2Docker](http://boot2docker.io). - -**This translates to running Docker (and Docker containers!) _anywhere_:** - - - **Linux:** - Ubuntu, CentOS / RHEL, Fedora, Gentoo, openSUSE and more. - - **Infrastructure-as-a-Service:** - Amazon AWS, Google GCE, Rackspace Cloud and probably, your favorite IaaS. - - **Microsoft Windows** - - **OS X** - -### Docker is Responsible - -*A tool that you can trust.* - -Docker does not just bring you a set of tools to isolate and run -applications. It also allows you to specify constraints and controls on -those resources. - -**This translates to:** - - - Fine tuning available resources for each application; - - Allocating memory or CPU intelligently to make most of your environment; - -Without dealing with complicated commands or third party applications. - -### Docker is Social - -*Docker knows that No One Is an Island.* - -Docker allows you to share the images you've built with the world. And -lots of people have already shared their own images. - -To facilitate this sharing Docker comes with a public registry called -[Docker.io](http://index.docker.io). If you don't want your images to be -public you can also use private images on [Docker.io](https://index.docker.io) -or even run your own registry behind your firewall. - -**This translates to:** - - - No more wasting time building everything from scratch; - - Easily and quickly save your application stack; - - Share and benefit from the depth of the Docker community. - -## Docker versus Virtual Machines - -> I suppose it is tempting, if the *only* tool you have is a hammer, to -> treat *everything* as if it were a nail. -> — **_Abraham Maslow_** - -**Docker containers are:** - - - Easy on the resources; - - Extremely light to deal with; - - Do not come with substantial overhead; - - Very easy to work with; - - Agnostic; - - Can work *on* virtual machines; - - Secure and isolated; - - *Artful*, *social*, *fun*, and; - - Powerful sand-boxes. - -**Docker containers are not:** - - - Hardware or OS emulators; - - Resource heavy; - - Platform, software or language dependent. - -## Docker Use Cases - -Docker is a framework. As a result it's flexible and powerful enough to -be used in a lot of different use cases. - -### For developers - - - **Developed with developers in mind:** - Build, test and ship applications with nothing but Docker and lean - containers. - - **Re-usable building blocks to create more:** - Docker images are easily updated building blocks. - - **Automatically build-able:** - It has never been this easy to build - *anything*. - - **Easy to integrate:** - A powerful, fully featured API allows you to integrate Docker into your tooling. - -### For sysadmins - - - **Efficient (and DevOps friendly!) lifecycle:** - Operations and developments are consistent, repeatable and reliable. - - **Balanced environments:** - Processes between development, testing and production are leveled. - - **Improvements on speed and integration:** - Containers are almost nothing more than isolated, secure processes. - - **Lowered costs of infrastructure:** - Containers are lightweight and heavy on resources compared to virtual machines. - - **Portable configurations:** - Issues and overheads with dealing with configurations and systems are eliminated. - -### For everyone - - - **Increased security without performance loss:** - Replacing VMs with containers provide security without additional - hardware (or software). - - **Portable:** - You can easily move applications and workloads from different operating - systems and platforms. - -## Where to go from here - -### Learn about Parts of Docker and the underlying technology - -Visit [Understanding the Technology](technology.md) in our Getting Started manual. - -### Get practical and learn how to use Docker straight away - -Visit [Working with Docker](working-with-docker.md) in our Getting Started manual. - -### Get the product and go hands-on - -Visit [Get Docker](get-docker.md) in our Getting Started manual. +1. You can build Docker images that hold your applications. +2. You can create Docker containers from those Docker images to run your + applications. +3. You can share those Docker images via + [Docker.io](https://index.docker.io) or your own registry. + +Let's look at how these elements combine together to make Docker work. + +### How does a Docker Image work? + +We've already seen that Docker images are read-only templates that +Docker containers are launched from. Each image consists of a series of +layers. Docker makes use of [union file +systems](http://en.wikipedia.org/wiki/UnionFS) to combine these layers +into a single image. Union file systems allow files and directories of +separate file systems, known as branches, to be transparently overlaid, +forming a single coherent file system. + +One of the reasons Docker is so lightweight is because of these layers. +When you change a Docker image, for example update an application to a +new version, this builds a new layer. Hence, rather than replacing the whole +image or entirely rebuilding, as you may do with a virtual machine, only +that layer is added or updated. Now you don't need to distribute a whole new image, +just the update, making distributing Docker images fast and simple. + +Every image starts from a base image, for example `ubuntu`, a base Ubuntu +image, or `fedora`, a base Fedora image. You can also use images of your +own as the basis for a new image, for example if you have a base Apache +image you could use this as the base of all your web application images. + +> **Note:** +> Docker usually gets these base images from [Docker.io](https://index.docker.io). + +Docker images are then built from these base images using a simple +descriptive set of steps we call *instructions*. Each instruction +creates a new layer in our image. Instructions include steps like: + +* Run a command. +* Add a file or directory. +* Create an environment variable. +* What process to run when launching a container from this image. + +These instructions are stored in a file called a `Dockerfile`. Docker +reads this `Dockerfile` when you request an image be built, executes the +instructions and returns a final image. + +### How does a Docker registry work? + +The Docker registry is the store for your Docker images. Once you build +a Docker image you can *push* it to a public registry [Docker.io]( +https://index.docker.io) or to your own registry running behind your +firewall. + +Using the Docker client, you can search for already published images and +then pull them down to your Docker host to build containers from them. + +[Docker.io](https://index.docker.io) provides both public and +private storage for images. Public storage is searchable and can be +downloaded by anyone. Private storage is excluded from search +results and only you and your users can pull them down and use them to +build containers. You can [sign up for a plan +here](https://index.docker.io/plans). + +### How does a container work? + +A container consists of an operating system, user added files and +meta-data. As we've discovered each container is built from an image. That image tells +Docker what the container holds, what process to run when the container +is launched and a variety of other configuration data. The Docker image +is read-only. When Docker runs a container from an image it adds a +read-write layer on top of the image (using a union file system as we +saw earlier) in which your application is then run. + +### What happens when you run a container? + +The Docker client using the `docker` binary, or via the API, tells the +Docker daemon to run a container. Let's take a look at what happens +next. + + $ docker run -i -t ubuntu /bin/bash + +Let's break down this command. The Docker client is launched using the +`docker` binary with the `run` option telling it to launch a new +container. The bare minimum the Docker client needs to tell the +Docker daemon to run the container is: + +* What Docker image to build the container from, here `ubuntu`, a base + Ubuntu image; +* The command you want to run inside the container when it is launched, + here `bin/bash` to shell the Bash shell inside the new container. + +So what happens under the covers when we run this command? + +Docker begins with: + +- **Pulling the `ubuntu` image:** + Docker checks for the presence of the `ubuntu` image and if it doesn't + exist locally on the host, then Docker downloads it from + [Docker.io](https://index.docker.io). If the image already exists then + Docker uses it for the new container. +- **Creates a new container:** + Once Docker has the image it creates a container from it: + * **Allocates a filesystem and mounts a read-write _layer_:** + The container is created in the file system and a read-write layer is + added to the image. + * **Allocates a network / bridge interface:** + Creates a network interface that allows the Docker container to talk to + the local host. + * **Sets up an IP address:** + Finds and attaches an available IP address from a pool. +- **Executes a process that you specify:** + Runs your application, and; +- **Captures and provides application output:** + Connects and logs standard input, outputs and errors for you to see how + your application is running. + +Now you have a running container! From here you can manage your running +container, interact with your application and then when finished stop +and remove your container. + +## The underlying technology + +Docker is written in Go and makes use of several Linux kernel features to +deliver the features we've seen. + +### Namespaces + +Docker takes advantage of a technology called `namespaces` to provide an +isolated workspace we call a *container*. When you run a container, +Docker creates a set of *namespaces* for that container. + +This provides a layer of isolation: each aspect of a container runs in +its own namespace and does not have access outside it. + +Some of the namespaces that Docker uses are: + + - **The `pid` namespace:** + Used for process isolation (PID: Process ID). + - **The `net` namespace:** + Used for managing network interfaces (NET: Networking). + - **The `ipc` namespace:** + Used for managing access to IPC resources (IPC: InterProcess +Communication). + - **The `mnt` namespace:** + Used for managing mount-points (MNT: Mount). + - **The `uts` namespace:** + Used for isolating kernel and version identifiers. (UTS: Unix Timesharing +System). + +### Control groups + +Docker also makes use of another technology called `cgroups` or control +groups. A key need to run applications in isolation is to have them only +use the resources you want. This ensures containers are good +multi-tenant citizens on a host. Control groups allow Docker to +share available hardware resources to containers and if required, set up to +limits and constraints, for example limiting the memory available to a +specific container. + +### Union file systems + +Union file systems or UnionFS are file systems that operate by creating +layers, making them very lightweight and fast. Docker uses union file +systems to provide the building blocks for containers. We learned about +union file systems earlier in this document. Docker can make use of +several union file system variants including: AUFS, btrfs, vfs, and +DeviceMapper. + +### Container format + +Docker combines these components into a wrapper we call a container +format. The default container format is called `libcontainer`. Docker +also supports traditional Linux containers using +[LXC](https://linuxcontainers.org/). In future Docker may support other +container formats, for example integration with BSD Jails or Solaris +Zones. + +## Next steps + +### Learning how to use Docker + +Visit [Working with Docker](working-with-docker.md). + +### Installing Docker + +Visit the [installation](/installation/#installation) section. ### Get the whole story [https://www.docker.io/the_whole_story/](https://www.docker.io/the_whole_story/) + diff --git a/docs/sources/introduction/working-with-docker.md b/docs/sources/introduction/working-with-docker.md index 8d946e6846..1abee1ce34 100644 --- a/docs/sources/introduction/working-with-docker.md +++ b/docs/sources/introduction/working-with-docker.md @@ -1,80 +1,63 @@ -page_title: Working with Docker and the Dockerfile -page_description: Working with Docker and The Dockerfile explained in depth +page_title: Introduction to working with Docker +page_description: Introduction to working with Docker and Docker commands. page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile -# Working with Docker and the Dockerfile +# An Introduction to working with Docker -*How to use and work with Docker?* +**Getting started with Docker** -> **Warning! Don't let this long page bore you.** -> If you prefer a summary and would like to see how a specific command +> **Note:** +> If you would like to see how a specific command > works, check out the glossary of all available client -> commands on our [User's Manual: Commands Reference]( -> http://docs.docker.io/reference/commandline/cli). +> commands on our [Commands Reference](/reference/commandline/cli). ## Introduction -On the last page, [Understanding the Technology](technology.md), we covered the -components that make up Docker and learnt about the -underlying technology and *how* everything works. +In the [Understanding Docker](understanding-docker.md) section we +covered the components that make up Docker, learned about the underlying +technology and saw *how* everything works. -Now, it is time to get practical and see *how to work with* the Docker client, -Docker containers and images and the `Dockerfile`. +Now, let's get an introduction to the basics of interacting with Docker. -> **Note:** You are encouraged to take a good look at the container, -> image and `Dockerfile` explanations here to have a better understanding -> on what exactly they are and to get an overall idea on how to work with -> them. On the next page (i.e., [Get Docker](get-docker.md)), you will be -> able to find links for platform-centric installation instructions. +> **Note:** +> This page assumes you have a host with a running Docker +> daemon and access to a Docker client. To see how to install Docker on +> a variety of platforms see the [installation +> section](/installation/#installation). -## Elements of Docker - -As we mentioned on the, [Understanding the Technology](technology.md) page, the main -elements of Docker are: - - - Containers; - - Images, and; - - The `Dockerfile`. - -> **Note:** This page is more *practical* than *technical*. If you are -> interested in understanding how these tools work behind the scenes -> and do their job, you can always read more on -> [Understanding the Technology](technology.md). - -## Working with the Docker client - -In order to work with the Docker client, you need to have a host with -the Docker daemon installed and running. - -### How to use the client +## How to use the client The client provides you a command-line interface to Docker. It is accessed by running the `docker` binary. -> **Tip:** The below instructions can be considered a summary of our -> *interactive tutorial*. If you prefer a more hands-on approach without -> installing anything, why not give that a shot and check out the -> [Docker Interactive Tutorial](https://www.docker.io/gettingstarted). +> **Tip:** +> The below instructions can be considered a summary of our +> [interactive tutorial](https://www.docker.io/gettingstarted). If you +> prefer a more hands-on approach without installing anything, why not +> give that a shot and check out the +> [tutorial](https://www.docker.io/gettingstarted). -The `docker` client usage consists of passing a chain of arguments: +The `docker` client usage is pretty simple. Each action you can take +with Docker is a command and each command can take a series of +flags and arguments. - # Usage: [sudo] docker [option] [command] [arguments] .. + # Usage: [sudo] docker [flags] [command] [arguments] .. # Example: $ docker run -i -t ubuntu /bin/bash -### Our first Docker command +## Using the Docker client -Let's get started with our first Docker command by checking the -version of the currently installed Docker client using the `docker -version` command. +Let's get started with the Docker client by running our first Docker +command. We're going to use the `docker version` command to return +version information on the currently installed Docker client and daemon. # Usage: [sudo] docker version # Example: $ docker version -This command will not only provide you the version of Docker client you -are using, but also the version of Go (the programming language powering -Docker). +This command will not only provide you the version of Docker client and +daemon you are using, but also the version of Go (the programming +language powering Docker). Client version: 0.8.0 Go version (client): go1.2 @@ -87,19 +70,16 @@ Docker). Last stable version: 0.8.0 -### Finding out all available commands +### Seeing what the Docker client can do -The user-centric nature of Docker means providing you a constant stream -of helpful instructions. This begins with the client itself. - -In order to get a full list of available commands run the `docker` -binary: +We can see all of the commands available to us with the Docker client by +running the `docker` binary without any options. # Usage: [sudo] docker # Example: $ docker -You will get an output with all currently available commands. +You will see a list of all currently available commands. Commands: attach Attach to a running container @@ -107,23 +87,23 @@ You will get an output with all currently available commands. commit Create a new image from a container's changes . . . -### Command usage instructions +### Seeing Docker command usage -The same way used to learn all available commands can be repeated to find -out usage instructions for a specific command. +You can also zoom in and review the usage for specific Docker commands. -Try typing Docker followed with a `[command]` to see the instructions: +Try typing Docker followed with a `[command]` to see the usage for that +command: # Usage: [sudo] docker [command] [--help] # Example: $ docker attach - Help outputs . . . + Help output . . . -Or you can pass the `--help` flag to the `docker` binary. +Or you can also pass the `--help` flag to the `docker` binary. $ docker images --help -You will get an output with all available options: +This will display the help text and all available flags: Usage: docker attach [OPTIONS] CONTAINER @@ -134,43 +114,47 @@ You will get an output with all available options: ## Working with images +Let's get started with using Docker by working with Docker images, the +building blocks of Docker containers. + ### Docker Images As we've discovered a Docker image is a read-only template that we build -containers from. Every Docker container is launched from an image and -you can use both images provided by others, for example we've discovered -the base `ubuntu` image provided by Docker, as well as images built by -others. For example we can build an image that runs Apache and our own -web application as a starting point to launch containers. +containers from. Every Docker container is launched from an image. You can +use both images provided by Docker, such as the base `ubuntu` image, +as well as images built by others. For example we can build an image that +runs Apache and our own web application as a starting point to launch containers. ### Searching for images To search for Docker image we use the `docker search` command. The `docker search` command returns a list of all images that match your -search criteria together with additional, useful information about that -image. This includes information such as social metrics like how many -other people like the image - we call these "likes" *stars*. We also -tell you if an image is *trusted*. A *trusted* image is built from a -known source and allows you to introspect in greater detail how the -image is constructed. +search criteria, together with some useful information about that image. + +This information includes social metrics like how many other people like +the image: we call these "likes" *stars*. We also tell you if an image +is *trusted*. A *trusted* image is built from a known source and allows +you to introspect in greater detail how the image is constructed. # Usage: [sudo] docker search [image name] # Example: $ docker search nginx - NAME DESCRIPTION STARS OFFICIAL TRUSTED - $ dockerfile/nginx Trusted Nginx (http://nginx.org/) Build 6 [OK] - paintedfox/nginx-php5 A docker image for running Nginx with PHP5. 3 [OK] - $ dockerfiles/django-uwsgi-nginx dockerfile and configuration files to buil... 2 [OK] + NAME DESCRIPTION STARS OFFICIAL TRUSTED + dockerfile/nginx Trusted Nginx (http://nginx.org/) Build 6 [OK] + paintedfox/nginx-php5 A docker image for running Nginx with PHP5. 3 [OK] + dockerfiles/django-uwsgi-nginx Dockerfile and configuration files to buil... 2 [OK] . . . -> **Note:** To learn more about trusted builds, check out [this]( -http://blog.docker.io/2013/11/introducing-trusted-builds) blog post. +> **Note:** +> To learn more about trusted builds, check out +> [this](http://blog.docker.io/2013/11/introducing-trusted-builds) blog +> post. ### Downloading an image -Downloading a Docker image is called *pulling*. To do this we hence use the -`docker pull` command. +Once we find an image we'd like to download we can pull it down from +[Docker.io](https://index.docker.io) using the `docker pull` command. # Usage: [sudo] docker pull [image name] # Example: @@ -183,13 +167,13 @@ Downloading a Docker image is called *pulling*. To do this we hence use the . . . As you can see, Docker will download, one by one, all the layers forming -the final image. This demonstrates the *building block* philosophy of -Docker. +the image. ### Listing available images -In order to get a full list of available images, you can use the -`docker images` command. +You may already have some images you've pulled down or built yourself +and you can use the `docker images` command to see the images +available to you locally. # Usage: [sudo] docker images # Example: @@ -198,28 +182,41 @@ In order to get a full list of available images, you can use the REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE myUserName/nginx latest a0d6c70867d2 41 seconds ago 578.8 MB nginx latest 173c2dd28ab2 3 minutes ago 578.8 MB - $ dockerfile/nginx latest 0ade68db1d05 3 weeks ago 578.8 MB + dockerfile/nginx latest 0ade68db1d05 3 weeks ago 578.8 MB + +### Building our own images + +You can build your own images using a `Dockerfile` and the `docker +build` command. The `Dockerfile` is very flexible and provides a +powerful set of instructions for building applications into Docker +images. To learn more about the `Dockerfile` see the [`Dockerfile` +Reference](/reference/builder/) and [tutorial](https://www.docker.io/learn/dockerfile/). ## Working with containers ### Docker Containers -Docker containers are directories on your Docker host that are built -from Docker images. In order to create or start a container, you need an -image. This could be the base `ubuntu` image or an image built and -shared with you or an image you've built yourself. +Docker containers run your applications and are built from Docker +images. In order to create or start a container, you need an image. This +could be the base `ubuntu` image or an image built and shared with you +or an image you've built yourself. ### Running a new container from an image -The easiest way to create a new container is to *run* one from an image. +The easiest way to create a new container is to *run* one from an image +using the `docker run` command. # Usage: [sudo] docker run [arguments] .. # Example: $ docker run -d --name nginx_web nginx /usr/sbin/nginx + 25137497b2749e226dd08f84a17e4b2be114ddf4ada04125f130ebfe0f1a03d3 This will create a new container from an image called `nginx` which will launch the command `/usr/sbin/nginx` when the container is run. We've -also given our container a name, `nginx_web`. +also given our container a name, `nginx_web`. When the container is run +Docker will return a container ID, a long string that uniquely +identifies our container. We use can the container's name or its string +to work with it. Containers can be run in two modes: @@ -227,7 +224,8 @@ Containers can be run in two modes: * Daemonized; An interactive container runs in the foreground and you can connect to -it and interact with it. A daemonized container runs in the background. +it and interact with it, for example sign into a shell on that +container. A daemonized container runs in the background. A container will run as long as the process you have launched inside it is running, for example if the `/usr/bin/nginx` process stops running @@ -237,7 +235,7 @@ the container will also stop. We can see a list of all the containers on our host using the `docker ps` command. By default the `docker ps` command only shows running -containers. But we can also add the `-a` flag to show *all* containers - +containers. But we can also add the `-a` flag to show *all* containers: both running and stopped. # Usage: [sudo] docker ps [-a] @@ -249,8 +247,8 @@ both running and stopped. ### Stopping a container -You can use the `docker stop` command to stop an active container. This will gracefully -end the active process. +You can use the `docker stop` command to stop an active container. This +will gracefully end the active process. # Usage: [sudo] docker stop [container ID] # Example: @@ -260,6 +258,10 @@ end the active process. If the `docker stop` command succeeds it will return the name of the container it has stopped. +> **Note:** +> If you want you to more aggressively stop a container you can use the +> `docker kill` command. + ### Starting a Container Stopped containers can be started again. @@ -272,136 +274,18 @@ Stopped containers can be started again. If the `docker start` command succeeds it will return the name of the freshly started container. -## Working with the Dockerfile +## Next steps -The `Dockerfile` holds the set of instructions Docker uses to build a Docker image. - -> **Tip:** Below is a short summary of our full Dockerfile tutorial. In -> order to get a better-grasp of how to work with these automation -> scripts, check out the [Dockerfile step-by-step -> tutorial](https://www.docker.io/learn/dockerfile). - -A `Dockerfile` contains instructions written in the following format: - - # Usage: Instruction [arguments / command] .. - # Example: - FROM ubuntu - -A `#` sign is used to provide a comment: - - # Comments .. - -> **Tip:** The `Dockerfile` is very flexible and provides a powerful set -> of instructions for building applications. To learn more about the -> `Dockerfile` and its instructions see the [Dockerfile -> Reference](http://docs.docker.io/reference/builder/). - -### First steps with the Dockerfile - -It's a good idea to add some comments to the start of your `Dockerfile` -to provide explanation and exposition to any future consumers, for -example: - - # - # Dockerfile to install Nginx - # VERSION 2 - EDITION 1 - -The first instruction in any `Dockerfile` must be the `FROM` instruction. The `FROM` instruction specifies the image name that this new image is built from, it is often a base image like `ubuntu`. - - # Base image used is Ubuntu: - FROM ubuntu - -Next, we recommend you use the `MAINTAINER` instruction to tell people who manages this image. - - # Maintainer: O.S. Tezer (@ostezer) - MAINTAINER O.S. Tezer, ostezer@gmail.com - -After this we can add additional instructions that represent the steps -to build our actual image. - -### Our Dockerfile so far - -So far our `Dockerfile` will look like. - - # Dockerfile to install Nginx - # VERSION 2 - EDITION 1 - FROM ubuntu - MAINTAINER O.S. Tezer, ostezer@gmail.com - -Let's install a package and configure an application inside our image. To do this we use a new -instruction: `RUN`. The `RUN` instruction executes commands inside our -image, for example. The instruction is just like running a command on -the command line inside a container. - - RUN echo "deb http://archive.ubuntu.com/ubuntu/ raring main universe" >> /etc/apt/sources.list - RUN apt-get update - RUN apt-get install -y nginx - RUN echo "\ndaemon off;" >> /etc/nginx/nginx.conf - -We can see here that we've *run* four instructions. Each time we run an -instruction a new layer is added to our image. Here's we've added an -Ubuntu package repository, updated the packages, installed the `nginx` -package and then echo'ed some configuration to the default -`/etc/nginx/nginx.conf` configuration file. - -Let's specify another instruction, `CMD`, that tells Docker what command -to run when a container is created from this image. - - CMD /usr/sbin/nginx - -We can now save this file and use it build an image. - -### Using a Dockerfile - -Docker uses the `Dockerfile` to build images. The build process is initiated by the `docker build` command. - - # Use the Dockerfile at the current location - # Usage: [sudo] docker build . - # Example: - $ docker build -t="my_nginx_image" . - - Uploading context 25.09 kB - Uploading context - Step 0 : FROM ubuntu - ---> 9cd978db300e - Step 1 : MAINTAINER O.S. Tezer, ostezer@gmail.com - ---> Using cache - ---> 467542d0cdd3 - Step 2 : RUN echo "deb http://archive.ubuntu.com/ubuntu/ raring main universe" >> /etc/apt/sources.list - ---> Using cache - ---> 0a688bd2a48c - Step 3 : RUN apt-get update - ---> Running in de2937e8915a - . . . - Step 10 : CMD /usr/sbin/nginx - ---> Running in b4908b9b9868 - ---> 626e92c5fab1 - Successfully built 626e92c5fab1 - -Here we can see that Docker has executed each instruction in turn and -each instruction has created a new layer in turn and each layer identified -by a new ID. The `-t` flag allows us to specify a name for our new -image, here `my_nginx_image`. - -We can see our new image using the `docker images` command. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - my_nginx_img latest 626e92c5fab1 57 seconds ago 337.6 MB - -## Where to go from here +Here we've learned the basics of how to interact with Docker images and +how to run and work with our first container. ### Understanding Docker -Visit [Understanding Docker](understanding-docker.md) in our Getting Started manual. +Visit [Understanding Docker](understanding-docker.md). -### Learn about parts of Docker and the underlying technology +### Installing Docker -Visit [Understanding the Technology](technology.md) in our Getting Started manual. - -### Get the product and go hands-on - -Visit [Get Docker](get-docker.md) in our Getting Started manual. +Visit the [installation](/installation/#installation) section. ### Get the whole story diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 47f4724b1a..84b56866ba 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -20,13 +20,23 @@ page_keywords: API, Docker, rcli, REST, documentation -The current version of the API is v1.11 +The current version of the API is v1.12 Calling /images//insert is the same as calling -/v1.11/images//insert +/v1.12/images//insert You can still call an old version of the api using -/v1.11/images//insert +/v1.12/images//insert + +## v1.12 + +### Full Documentation + +[*Docker Remote API v1.12*](/reference/api/docker_remote_api_v1.12/) + +### What's new + +docker build now has support for the `forcerm` parameter to always remove containers ## v1.11 diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 721244b49e..f743cb0b22 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -1023,6 +1023,7 @@ Build an image from Dockerfile via stdin the resulting image in case of success - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image + - **rm** - remove intermediate containers after a successful build Request Headers: diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 53e07b380c..8f4709ee69 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -1063,6 +1063,7 @@ Build an image from Dockerfile via stdin the resulting image in case of success - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image + - **rm** - remove intermediate containers after a successful build Request Headers: diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md new file mode 100644 index 0000000000..188d4fe0a2 --- /dev/null +++ b/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -0,0 +1,1363 @@ +page_title: Remote API v1.12 +page_description: API Documentation for Docker +page_keywords: API, Docker, rcli, REST, documentation + +# Docker Remote API v1.12 + +## 1. Brief introduction + + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can + [*Bind Docker to another host/port or a Unix socket*]( + /use/basics/#bind-docker). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + + **Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + } + ] + + Query Parameters: + +   + + - **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default + - **limit** – Show `limit` last created + containers, include non-running ones. + - **since** – Show only containers created since Id, include + non-running ones. + - **before** – Show only containers created before Id, include + non-running ones. + - **size** – 1/True/true or 0/False/false, Show the containers + sizes + + Status Codes: + + - **200** – no error + - **400** – bad parameter + - **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + + **Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "VolumesFrom":"", + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + + **Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + + Json Parameters: + +   + + - **config** – the container's configuration + + Query Parameters: + +   + + - **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + + Status Codes: + + - **201** – no error + - **404** – no such container + - **406** – impossible to attach (container not running) + - **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + + **Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir":"" + + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + + **Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles":[ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes":[ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + + Query Parameters: + +   + + - **ps_args** – ps arguments to use (eg. aux) + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + + **Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + Query Parameters: + +   + + - **follow** – 1/True/true or 0/False/false, return stream. + Default false + - **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log. Default false + - **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log. Default false + - **timestamps** – 1/True/true or 0/False/false, if logs=true, print + timestamps for every log line. Default false + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + + **Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } + ] + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + + **Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + + **Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false + } + + **Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + + Json Parameters: + +   + + - **hostConfig** – the container's host configuration (optional) + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + + **Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters: + +   + + - **t** – number of seconds to wait before killing the container + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + + **Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters: + +   + + - **t** – number of seconds to wait before killing the container + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + + **Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters + + - **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + + **Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + Query Parameters: + +   + + - **logs** – 1/True/true or 0/False/false, return logs. Default + false + - **stream** – 1/True/true or 0/False/false, return stream. + Default false + - **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false + - **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false + - **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + + Status Codes: + + - **200** – no error + - **400** – bad parameter + - **404** – no such container + - **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + + - 0: stdin (will be writen on stdout) + - 1: stdout + - 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 byets + 4. Read the extracted size and output it on the correct output + 5. Goto 1) + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + + **Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode":0} + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + + **Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters: + +   + + - **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + - **force** – 1/True/true or 0/False/false, Removes the container + even if it was running. Default false + + Status Codes: + + - **204** – no error + - **400** – bad parameter + - **404** – no such container + - **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + + **Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource":"test.txt" + } + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it + + **Example request**: + + POST /images/create?fromImage=base HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + + Query Parameters: + +   + + - **fromImage** – name of the image to pull + - **fromSrc** – source to import, - means stdin + - **repo** – repository + - **tag** – tag + - **registry** – the registry to pull from + + Request Headers: + +   + + - **X-Registry-Auth** – base64-encoded AuthConfig object + + Status Codes: + + - **200** – no error + - **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + + **Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} + {"error":"Invalid..."} + ... + + Status Codes: + + - **200** – no error + - **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + + **Example request**: + + GET /images/base/json HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"] + ,"Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + + Status Codes: + + - **200** – no error + - **404** – no such image + - **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + + **Example request**: + + GET /images/base/history HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + + Status Codes: + + - **200** – no error + - **404** – no such image + - **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + + **Example request**: + + POST /images/test/push HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"error":"Invalid..."} + ... + + Query Parameters: + +   + + - **registry** – the registry you wan to push, optional + + Request Headers: + +   + + - **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + + Status Codes: + + - **200** – no error + - **404** – no such image + - **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + + **Example request**: + + POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 + + **Example response**: + + HTTP/1.1 201 OK + + Query Parameters: + +   + + - **repo** – The repository to tag in + - **force** – 1/True/true or 0/False/false, default false + + Status Codes: + + - **201** – no error + - **400** – bad parameter + - **404** – no such image + - **409** – conflict + - **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + + **Example request**: + + DELETE /images/test HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged":"3e2f21a89f"}, + {"Deleted":"3e2f21a89f"}, + {"Deleted":"53b4f83ac9"} + ] + + Query Parameters: + +   + + - **force** – 1/True/true or 0/False/false, default false + - **noprune** – 1/True/true or 0/False/false, default false + + Status Codes: + + - **200** – no error + - **404** – no such image + - **409** – conflict + - **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker.io](https://index.docker.io). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + + **Example request**: + + GET /images/search?term=sshd HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + + Query Parameters: + +   + + - **term** – term to search + + Status Codes: + + - **200** – no error + - **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + + **Example request**: + + POST /build HTTP/1.1 + + {{ STREAM }} + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream":"Step 1..."} + {"stream":"..."} + {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + + Query Parameters: + +   + + - **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success + - **q** – suppress verbose build output + - **nocache** – do not use the cache when building the image + - **rm** - remove intermediate containers after a successful build (default behavior) + - **forcerm - always remove intermediate containers (includes rm) + + Request Headers: + +   + + - **Content-type** – should be set to + `"application/tar"`. + - **X-Registry-Config** – base64-encoded ConfigFile object + + Status Codes: + + - **200** – no error + - **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + + **Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com", + "serveraddress":"https://index.docker.io/v1/" + } + + **Example response**: + + HTTP/1.1 200 OK + + Status Codes: + + - **200** – no error + - **204** – no error + - **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + + **Example request**: + + GET /info HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + + Status Codes: + + - **200** – no error + - **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + + **Example request**: + + GET /version HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + + Status Codes: + + - **200** – no error + - **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + + **Example request**: + + GET /_ping HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + + OK + + Status Codes: + + - **200** - no error + - **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + + **Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + + **Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id":"596069db4bf5"} + + Json Parameters: + + + + - **config** - the container's configuration + + Query Parameters: + +   + + - **container** – source container + - **repo** – repository + - **tag** – tag + - **m** – commit message + - **author** – author (eg. "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + + Status Codes: + + - **201** – no error + - **404** – no such container + - **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or +via polling (using since) + + **Example request**: + + GET /events?since=1374067924 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} + + Query Parameters: + +   + + - **since** – timestamp used for polling + - **until** – timestamp used for polling + + Status Codes: + + - **200** – no error + - **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. + + **Example request** + + GET /images/ubuntu/get + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + + Status Codes: + + - **200** – no error + - **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. + + **Example request** + + POST /images/load + + Tarball in body + + **Example response**: + + HTTP/1.1 200 OK + + Status Codes: + + - **200** – no error + - **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run`: + +- Create the container + +- If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"–api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 98e9e0f544..6a4ae4ad25 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -131,6 +131,16 @@ any point in an image's history, much like source control. The *exec* form makes it possible to avoid shell string munging, and to `RUN` commands using a base image that does not contain `/bin/sh`. +The cache for `RUN` instructions isn't invalidated automatically during the +next build. The cache for an instruction like `RUN apt-get dist-upgrade -y` +will be reused during the next build. +The cache for `RUN` instructions can be invalidated by using the `--no-cache` +flag, for example `docker build --no-cache`. + +The first encountered `ADD` instruction will invalidate the cache for all +following instructions from the 'Dockerfile' if the contents of the context +have changed. This will also invalidate the cache for `RUN` instructions. + ### Known Issues (RUN) - [Issue 783](https://github.com/dotcloud/docker/issues/783) is about file @@ -225,7 +235,9 @@ being built (also called the *context* of the build) or a remote file URL. `` is the absolute path to which the source will be copied inside the destination container. -All new files and directories are created with mode 0755, uid and gid 0. +All new files and directories are created with a uid and gid of 0. + +In the case where `` is a remote file URL, the destination will have permissions 600. > **Note**: > If you build using STDIN (`docker build - < somefile`), there is no @@ -325,15 +337,17 @@ optional but default, you could use a CMD: The `VOLUME` instruction will create a mount point with the specified name and mark it as holding externally mounted volumes from native host or other -containers. For more information/examples and mounting instructions via docker -client, refer to [*Share Directories via Volumes*]( +containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain +string, `VOLUME /var/log`. For more information/examples and mounting +instructions via the Docker client, refer to [*Share Directories via Volumes*]( /use/working_with_volumes/#volume-def) documentation. ## USER USER daemon -The `USER` instruction sets the username or UID to use when running the image. +The `USER` instruction sets the username or UID to use when running the image +and for any following `RUN` directives. ## WORKDIR diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 8e0507cbf8..2a1be89086 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -50,35 +50,38 @@ expect an integer, and they can only be specified once. ## daemon Usage of docker: + --api-enable-cors=false Enable CORS headers in the remote API + -b, --bridge="" Attach containers to a pre-existing network bridge + use 'none' to disable container networking + --bip="" Use this CIDR notation address for the network bridge's IP, not compatible with -b + -d, --daemon=false Enable daemon mode + -D, --debug=false Enable debug mode + --dns=[] Force docker to use specific DNS servers + --dns-search=[] Force Docker to use specific DNS search domains + -e, --exec-driver="native" Force the docker runtime to use a specific exec driver + -G, --group="docker" Group to assign the unix socket specified by -H when running in daemon mode + use '' (the empty string) to disable setting of a group + -g, --graph="/var/lib/docker" Path to use as the root of the docker runtime + -H, --host=[] The socket(s) to bind to in daemon mode + specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + --icc=true Enable inter-container communication + --ip="0.0.0.0" Default IP address to use when binding container ports + --ip-forward=true Enable net.ipv4.ip_forward + --iptables=true Enable Docker's addition of iptables rules + --mtu=0 Set the containers network MTU + if no value is provided: default to the default route MTU or 1500 if no default route is available + -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file + -r, --restart=true Restart previously running containers + -s, --storage-driver="" Force the docker runtime to use a specific storage driver + --selinux-enabled=false Enable selinux support + --tls=false Use TLS; implied by tls-verify flags + --tlscacert="/home/sven/.docker/ca.pem" Trust only remotes providing a certificate signed by the CA given here + --tlscert="/home/sven/.docker/cert.pem" Path to TLS certificate file + --tlskey="/home/sven/.docker/key.pem" Path to TLS key file + --tlsverify=false Use TLS and verify the remote (daemon: verify client, client: verify daemon) + -v, --version=false Print version information and quit - -D, --debug=false: Enable debug mode - -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group - --api-enable-cors=false: Enable CORS headers in the remote API - -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking - -bip="": Use this CIDR notation address for the network bridge᾿s IP, not compatible with -b - -d, --daemon=false: Enable daemon mode - --dns=[]: Force docker to use specific DNS servers - --dns-search=[]: Force Docker to use specific DNS search domains - --enable-selinux=false: Enable selinux support for running containers - -g, --graph="/var/lib/docker": Path to use as the root of the docker runtime - --icc=true: Enable inter-container communication - --ip="0.0.0.0": Default IP address to use when binding container ports - --ip-forward=true: Enable net.ipv4.ip_forward - --iptables=true: Enable Docker᾿s addition of iptables rules - -p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file - -r, --restart=true: Restart previously running containers - -s, --storage-driver="": Force the docker runtime to use a specific storage driver - -e, --exec-driver="native": Force the docker runtime to use a specific exec driver - -v, --version=false: Print version information and quit - --tls=false: Use TLS; implied by tls-verify flags - --tlscacert="~/.docker/ca.pem": Trust only remotes providing a certificate signed by the CA given here - --tlscert="~/.docker/cert.pem": Path to TLS certificate file - --tlskey="~/.docker/key.pem": Path to TLS key file - --tlsverify=false: Use TLS and verify the remote (daemon: verify client, client: verify daemon) - --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available - - Options with [] may be specified multiple times. +Options with [] may be specified multiple times. The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the daemon and client. To run the @@ -126,12 +129,12 @@ like this: ## attach -Attach to a running container. + Usage: docker attach [OPTIONS] CONTAINER - Usage: docker attach CONTAINER + Attach to a running container - --no-stdin=false: Do not attach stdin - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --no-stdin=false Do not attach stdin + --sig-proxy=true Proxify all received signal to the process (even in non-tty mode) The `attach` command will allow you to view or interact with any running container, detached (`-d`) @@ -185,15 +188,15 @@ To kill the container, use `docker kill`. ## build -Build a new container image from the source code at PATH - Usage: docker build [OPTIONS] PATH | URL | - - -t, --tag="": Repository name (and optionally a tag) to be applied - to the resulting image in case of success. - -q, --quiet=false: Suppress the verbose output generated by the containers. - --no-cache: Do not use the cache when building the image. - --rm=true: Remove intermediate containers after a successful build + Build a new container image from the source code at PATH + + --force-rm=false Always remove intermediate containers, even after unsuccessful builds + --no-cache=false Do not use cache when building the image + -q, --quiet=false Suppress the verbose output generated by the containers + --rm=true Remove intermediate containers after a successful build + -t, --tag="" Repository name (and optionally a tag) to be applied to the resulting image in case of success Use this command to build Docker images from a Dockerfile and a "context". @@ -201,7 +204,8 @@ and a "context". The files at `PATH` or `URL` are called the "context" of the build. The build process may refer to any of the files in the context, for example when using an [*ADD*](/reference/builder/#dockerfile-add) instruction. When a single Dockerfile is -given as `URL`, then no context is set. +given as `URL` or is piped through STDIN (`docker build - < Dockerfile`), then +no context is set. When a Git repository is set as `URL`, then the repository is used as the context. The Git repository is cloned with its @@ -283,14 +287,21 @@ repository is used as Dockerfile. Note that you can specify an arbitrary Git repository by using the `git://` schema. -## commit +> **Note:** `docker build` will return a `no such file or directory` error +> if the file or directory does not exist in the uploaded context. This may +> happen if there is no context, or if you specify a file that is elsewhere +> on the Host system. The context is limited to the current directory (and its +> children) for security reasons, and to ensure repeatable builds on remote +> Docker hosts. This is also the reason why `ADD ../file` will not work. -Create a new image from a container᾿s changes +## commit Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] - -m, --message="": Commit message - -a, --author="": Author (eg. "John Hannibal Smith " + Create a new image from a container's changes + + -a, --author="" Author (eg. "John Hannibal Smith " + -m, --message="" Commit message It can be useful to commit a container's file changes or settings into a new image. This allows you debug a container by running an interactive @@ -317,8 +328,7 @@ path. Paths are relative to the root of the filesystem. Usage: docker cp CONTAINER:PATH HOSTPATH - $ sudo docker cp 7bb0e258aefe:/etc/debian_version . - $ sudo docker cp blue_frog:/etc/hosts . + Copy files/folders from the PATH to the HOSTPATH ## diff @@ -326,6 +336,8 @@ List the changed files and directories in a container᾿s filesystem Usage: docker diff CONTAINER + Inspect changes on a container's filesystem + There are 3 events that are listed in the `diff`: 1. `A` - Add @@ -350,14 +362,12 @@ For example: ## events -Get real time events from the server + Usage: docker events [OPTIONS] - Usage: docker events + Get real time events from the server - --since="": Show all events created since timestamp - (either seconds since epoch, or date string as below) - --until="": Show events created before timestamp - (either seconds since epoch, or date string as below) + --since="" Show all events created since timestamp + --until="" Stream events until this timestamp ### Examples @@ -395,22 +405,22 @@ You'll need two shells for this example. ## export -Export the contents of a filesystem as a tar archive to STDOUT - Usage: docker export CONTAINER + Export the contents of a filesystem as a tar archive to STDOUT + For example: $ sudo docker export red_panda > latest.tar ## history -Show the history of an image - Usage: docker history [OPTIONS] IMAGE - --no-trunc=false: Don᾿t truncate output - -q, --quiet=false: Only show numeric IDs + Show the history of an image + + --no-trunc=false Don't truncate output + -q, --quiet=false Only show numeric IDs To see how the `docker:latest` image was built: @@ -425,13 +435,13 @@ To see how the `docker:latest` image was built: ## images -List images - Usage: docker images [OPTIONS] [NAME] - -a, --all=false: Show all images (by default filter out the intermediate image layers) - --no-trunc=false: Don᾿t truncate output - -q, --quiet=false: Only show numeric IDs + List images + + -a, --all=false Show all images (by default filter out the intermediate image layers) + --no-trunc=false Don't truncate output + -q, --quiet=false Only show numeric IDs The default `docker images` will show all top level images, their repository and tags, and their virtual size. @@ -473,8 +483,7 @@ by default. Usage: docker import URL|- [REPOSITORY[:TAG]] - Create an empty filesystem image and import the contents of the tarball - (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. + Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. URLs must start with `http` and point to a single file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a @@ -507,10 +516,12 @@ tar, then the ownerships might not get preserved. ## info -Display system-wide information. - Usage: docker info + Display system-wide information + +For example: + $ sudo docker info Containers: 292 Images: 194 @@ -528,11 +539,11 @@ ensure we know how your setup is configured. ## inspect -Return low-level information on a container/image - Usage: docker inspect CONTAINER|IMAGE [CONTAINER|IMAGE...] - -f, --format="": Format the output using the given go template. + Return low-level information on a container/image + + -f, --format="" Format the output using the given go template. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result. @@ -582,11 +593,11 @@ contains complex json object, so to grab it as JSON, you use ## kill -Kill a running container (send SIGKILL, or specified signal) - Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] - -s, --signal="KILL": Signal to send to the container + Kill a running container (send SIGKILL, or specified signal) + + -s, --signal="KILL" Signal to send to the container The main process inside the container will be sent SIGKILL, or any signal specified with option `--signal`. @@ -602,11 +613,11 @@ signal specified with option `--signal`. ## load -Load an image from a tar archive on STDIN - Usage: docker load - -i, --input="": Read from a tar archive file, instead of STDIN + Load an image from a tar archive on STDIN + + -i, --input="" Read from a tar archive file, instead of STDIN Loads a tarred repository from a file or the standard input stream. Restores both images and tags. @@ -628,13 +639,13 @@ Restores both images and tags. ## login -Register or Login to the docker registry server - Usage: docker login [OPTIONS] [SERVER] - -e, --email="": Email - -p, --password="": Password - -u, --username="": Username + Register or Login to a docker registry server, if no server is specified "https://index.docker.io/v1/" is the default. + + -e, --email="" Email + -p, --password="" Password + -u, --username="" Username If you want to login to a private registry you can specify this by adding the server name. @@ -644,12 +655,12 @@ specify this by adding the server name. ## logs -Fetch the logs of a container + Usage: docker logs CONTAINER - Usage: docker logs [OPTIONS] CONTAINER + Fetch the logs of a container - -f, --follow=false: Follow log output - -t, --timestamps=false: Show timestamps + -f, --follow=false Follow log output + -t, --timestamps=false Show timestamps The `docker logs` command batch-retrieves all logs present at the time of execution. @@ -660,24 +671,24 @@ and stderr. ## port - Usage: docker port [OPTIONS] CONTAINER PRIVATE_PORT + Usage: docker port CONTAINER PRIVATE_PORT -Lookup the public-facing port which is NAT-ed to PRIVATE_PORT + Lookup the public-facing port which is NAT-ed to PRIVATE_PORT ## ps -List containers - Usage: docker ps [OPTIONS] - -a, --all=false: Show all containers. Only running containers are shown by default. - --before="": Show only container created before Id or Name, include non-running ones. - -l, --latest=false: Show only the latest created container, include non-running ones. - -n=-1: Show n last created containers, include non-running ones. - --no-trunc=false: Don᾿t truncate output - -q, --quiet=false: Only display numeric IDs - -s, --size=false: Display sizes, not to be used with -q - --since="": Show only containers created since Id or Name, include non-running ones. + List containers + + -a, --all=false Show all containers. Only running containers are shown by default. + --before="" Show only container created before Id or Name, include non-running ones. + -l, --latest=false Show only the latest created container, include non-running ones. + -n=-1 Show n last created containers, include non-running ones. + --no-trunc=false Don't truncate output + -q, --quiet=false Only display numeric IDs + -s, --size=false Display sizes + --since="" Show only containers created since Id or Name, include non-running ones. Running `docker ps` showing 2 linked containers. @@ -691,10 +702,10 @@ Running `docker ps` showing 2 linked containers. ## pull -Pull an image or a repository from the registry - Usage: docker pull NAME[:TAG] + Pull an image or a repository from the registry + Most of your images will be created on top of a base image from the [Docker.io](https://index.docker.io) registry. @@ -713,30 +724,30 @@ use `docker pull`: ## push -Push an image or a repository to the registry - Usage: docker push NAME[:TAG] + Push an image or a repository to the registry + Use `docker push` to share your images on public or private registries. ## restart -Restart a running container + Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] - Usage: docker restart [OPTIONS] NAME + Restart a running container - -t, --time=10: Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10 + -t, --time=10 Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10 ## rm -Remove one or more containers + Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] - Usage: docker rm [OPTIONS] CONTAINER + Remove one or more containers - -l, --link="": Remove the link instead of the actual container - -f, --force=false: Force removal of running container - -v, --volumes=false: Remove the volumes associated to the container + -f, --force=false Force removal of running container + -l, --link=false Remove the specified link and not the underlying container + -v, --volumes=false Remove the volumes associated to the container ### Known Issues (rm) @@ -768,12 +779,12 @@ delete them. Any running containers will not be deleted. ## rmi -Remove one or more images - Usage: docker rmi IMAGE [IMAGE...] - -f, --force=false: Force - --no-prune=false: Do not delete untagged parents + Remove one or more images + + -f, --force=false Force + --no-prune=false Do not delete untagged parents ### Removing tagged images @@ -805,43 +816,43 @@ removed before the image is removed. ## run -Run a command in a new container + Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] - Usage: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] + Run a command in a new container - -a, --attach=[] Attach to stdin, stdout or stderr. - -c, --cpu-shares=0 CPU shares (relative weight) - --cidfile="" Write the container ID to the file - -d, --detach=false Detached mode: Run container in the background, print new container id - --dns=[] Set custom dns servers - --dns-search=[] Set custom dns search domains - -e, --env=[] Set environment variables - --entrypoint="" Overwrite the default entrypoint of the image - --env-file=[] Read in a line delimited file of ENV variables - --expose=[] Expose a port from the container without publishing it to your host - -h, --hostname="" Container host name - -i, --interactive=false Keep stdin open even if not attached - --link=[] Add link to another container (name:alias) - --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - -m, --memory="" Memory limit (format: , where unit = b, k, m or g) - --name="" Assign a name to the container - --net="bridge" Set the Network mode for the container - 'bridge': creates a new network stack for the container on the docker bridge - 'none': no networking for this container - 'container:': reuses another container network stack - 'host': use the host network stack inside the contaner - -p, --publish=[] Publish a container's port to the host - format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort - (use 'docker port' to see the actual mapping) - -P, --publish-all=false Publish all exposed ports to the host interfaces - --privileged=false Give extended privileges to this container - --rm=false Automatically remove the container when it exits (incompatible with -d) - --sig-proxy=true Proxify all received signal to the process (even in non-tty mode) - -t, --tty=false Allocate a pseudo-tty - -u, --user="" Username or UID - -v, --volume=[] Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container) - --volumes-from=[] Mount volumes from the specified container(s) - -w, --workdir="" Working directory inside the container + -a, --attach=[] Attach to stdin, stdout or stderr. + -c, --cpu-shares=0 CPU shares (relative weight) + --cidfile="" Write the container ID to the file + -d, --detach=false Detached mode: Run container in the background, print new container id + --dns=[] Set custom dns servers + --dns-search=[] Set custom dns search domains + -e, --env=[] Set environment variables + --entrypoint="" Overwrite the default entrypoint of the image + --env-file=[] Read in a line delimited file of ENV variables + --expose=[] Expose a port from the container without publishing it to your host + -h, --hostname="" Container host name + -i, --interactive=false Keep stdin open even if not attached + --link=[] Add link to another container (name:alias) + --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + -m, --memory="" Memory limit (format: , where unit = b, k, m or g) + --name="" Assign a name to the container + --net="bridge" Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the contaner + -p, --publish=[] Publish a container's port to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort + (use 'docker port' to see the actual mapping) + -P, --publish-all=false Publish all exposed ports to the host interfaces + --privileged=false Give extended privileges to this container + --rm=false Automatically remove the container when it exits (incompatible with -d) + --sig-proxy=true Proxify all received signal to the process (even in non-tty mode) + -t, --tty=false Allocate a pseudo-tty + -u, --user="" Username or UID + -v, --volume=[] Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container) + --volumes-from=[] Mount volumes from the specified container(s) + -w, --workdir="" Working directory inside the container The `docker run` command first `creates` a writeable container layer over the specified image, and then `starts` it using the specified command. That is, @@ -1052,11 +1063,11 @@ application change: ## save -Save an image to a tar archive (streamed to stdout by default) - Usage: docker save IMAGE - -o, --output="": Write to an file, instead of STDOUT + Save an image to a tar archive (streamed to stdout by default) + + -o, --output="" Write to an file, instead of STDOUT Produces a tarred repository to the standard output stream. Contains all parent layers, and all tags + versions, or specified repo:tag. @@ -1065,11 +1076,11 @@ It is used to create a backup that can then be used with `docker load` $ sudo docker save busybox > busybox.tar - $ ls -sh b.tar - 2.7M b.tar + $ ls -sh busybox.tar + 2.7M busybox.tar $ sudo docker save --output busybox.tar busybox - $ ls -sh b.tar - 2.7M b.tar + $ ls -sh busybox.tar + 2.7M busybox.tar $ sudo docker save -o fedora-all.tar fedora $ sudo docker save -o fedora-latest.tar fedora:latest @@ -1079,9 +1090,11 @@ Search [Docker.io](https://index.docker.io) for images Usage: docker search TERM - --no-trunc=false: Don᾿t truncate output - -s, --stars=0: Only displays with at least xxx stars - -t, --trusted=false: Only show trusted builds + Search the docker index for images + + --no-trunc=false Don't truncate output + -s, --stars=0 Only displays with at least xxx stars + -t, --trusted=false Only show trusted builds See [*Find Public Images on Docker.io*]( /use/workingwithrepository/#find-public-images-on-dockerio) for @@ -1089,31 +1102,31 @@ more details on finding shared images from the commandline. ## start -Start a stopped container + Usage: docker start CONTAINER [CONTAINER...] - Usage: docker start [OPTIONS] CONTAINER + Restart a stopped container - -a, --attach=false: Attach container᾿s stdout/stderr and forward all signals to the process - -i, --interactive=false: Attach container᾿s stdin + -a, --attach=false Attach container's stdout/stderr and forward all signals to the process + -i, --interactive=false Attach container's stdin ## stop -Stop a running container (Send SIGTERM, and then SIGKILL after grace period) - Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - -t, --time=10: Number of seconds to wait for the container to stop before killing it. + Stop a running container (Send SIGTERM, and then SIGKILL after grace period) + + -t, --time=10 Number of seconds to wait for the container to stop before killing it. The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL ## tag -Tag an image into a repository - Usage: docker tag [OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG] - -f, --force=false: Force + Tag an image into a repository + + -f, --force=false Force You can group your images together using names and tags, and then upload them to [*Share Images via Repositories*]( @@ -1123,15 +1136,19 @@ them to [*Share Images via Repositories*]( Usage: docker top CONTAINER [ps OPTIONS] -Lookup the running processes of a container + Lookup the running processes of a container ## version -Show the version of the Docker client, daemon, and latest released -version. + Usage: docker version + + Show the docker version information. + +Show the Docker version, API version, Git commit, and Go version of +both Docker client and daemon. ## wait - Usage: docker wait [OPTIONS] NAME + Usage: docker wait CONTAINER [CONTAINER...] -Block until a container stops, then print its exit code. + Block until a container stops, then print its exit code. diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 09c2b642a1..aa3d941b13 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -54,10 +54,10 @@ following options. - [Detached (-d)](#detached-d) - [Foreground](#foreground) - [Container Identification](#container-identification) - - [Name (–name)](#name-name) + - [Name (--name)](#name-name) - [PID Equivalent](#pid-equivalent) - [Network Settings](#network-settings) - - [Clean Up (–rm)](#clean-up-rm) + - [Clean Up (--rm)](#clean-up-rm) - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory) - [Runtime Privilege and LXC @@ -136,9 +136,8 @@ PID files): ## Network Settings - --dns=[] : Set custom dns servers for the container - --net="bridge": Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack), 'host': use the host network stack inside the container - --net="bridge" Set the Network mode for the container + --dns=[] : Set custom dns servers for the container + --net="bridge" : Set the Network mode for the container 'bridge': creates a new network stack for the container on the docker bridge 'none': no networking for this container 'container:': reuses another container network stack @@ -235,7 +234,7 @@ By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. This is because by default a container is not allowed to access any devices, but a "privileged" container is given access to all devices (see [lxc-template.go]( -https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go) +https://github.com/dotcloud/docker/blob/master/daemon/execdriver/lxc/lxc_template.go) and documentation on [cgroups devices]( https://www.kernel.org/doc/Documentation/cgroups/devices.txt)). @@ -250,7 +249,7 @@ If the Docker daemon was started using the `lxc` exec-driver (`docker -d --exec-driver=lxc`) then the operator can also specify LXC options using one or more `--lxc-conf` parameters. These can be new parameters or override existing parameters from the [lxc-template.go]( -https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go). +https://github.com/dotcloud/docker/blob/master/daemon/execdriver/lxc/lxc_template.go). Note that in the future, a given host's docker daemon may not use LXC, so this is an implementation-specific configuration meant for operators already familiar with using LXC directly. @@ -440,6 +439,8 @@ but the operator can override it: -u="": Username or UID +> **Note:** if you pass numeric uid, it must be in range 0-2147483647. + ## WORKDIR The default working directory for running binaries within a container is the diff --git a/docs/sources/use/ambassador_pattern_linking.md b/docs/sources/use/ambassador_pattern_linking.md index 2bdd434f6e..755fa4dc9c 100644 --- a/docs/sources/use/ambassador_pattern_linking.md +++ b/docs/sources/use/ambassador_pattern_linking.md @@ -7,82 +7,77 @@ page_keywords: Examples, Usage, links, docker, documentation, examples, names, n ## Introduction Rather than hardcoding network links between a service consumer and -provider, Docker encourages service portability. - -eg, instead of +provider, Docker encourages service portability, for example instead of: (consumer) --> (redis) -requiring you to restart the `consumer` to attach it -to a different `redis` service, you can add -ambassadors +Requiring you to restart the `consumer` to attach it to a different +`redis` service, you can add ambassadors: (consumer) --> (redis-ambassador) --> (redis) - or +Or (consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis) -When you need to rewire your consumer to talk to a different redis -server, you can just restart the `redis-ambassador` -container that the consumer is connected to. +When you need to rewire your consumer to talk to a different Redis +server, you can just restart the `redis-ambassador` container that the +consumer is connected to. -This pattern also allows you to transparently move the redis server to a +This pattern also allows you to transparently move the Redis server to a different docker host from the consumer. -Using the `svendowideit/ambassador` container, the -link wiring is controlled entirely from the `docker run` -parameters. +Using the `svendowideit/ambassador` container, the link wiring is +controlled entirely from the `docker run` parameters. ## Two host Example -Start actual redis server on one Docker host +Start actual Redis server on one Docker host - big-server $ docker run -d -name redis crosbymichael/redis + big-server $ docker run -d --name redis crosbymichael/redis -Then add an ambassador linked to the redis server, mapping a port to the +Then add an ambassador linked to the Redis server, mapping a port to the outside world - big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador + big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador On the other host, you can set up another ambassador setting environment -variables for each remote port we want to proxy to the -`big-server` +variables for each remote port we want to proxy to the `big-server` - client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador + client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador -Then on the `client-server` host, you can use a -redis client container to talk to the remote redis server, just by -linking to the local redis ambassador. +Then on the `client-server` host, you can use a Redis client container +to talk to the remote Redis server, just by linking to the local Redis +ambassador. - client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli + client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli redis 172.17.0.160:6379> ping PONG ## How it works -The following example shows what the `svendowideit/ambassador` -container does automatically (with a tiny amount of `sed`) +The following example shows what the `svendowideit/ambassador` container +does automatically (with a tiny amount of `sed`) -On the docker host (192.168.1.52) that redis will run on: +On the Docker host (192.168.1.52) that Redis will run on: # start actual redis server - $ docker run -d -name redis crosbymichael/redis + $ docker run -d --name redis crosbymichael/redis # get a redis-cli container for connection testing $ docker pull relateiq/redis-cli # test the redis server by talking to it directly - $ docker run -t -i -rm -link redis:redis relateiq/redis-cli + $ docker run -t -i --rm --link redis:redis relateiq/redis-cli redis 172.17.0.136:6379> ping PONG ^D # add redis ambassador - $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh + $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh -in the redis_ambassador container, you can see the linked redis -containers'senv +In the `redis_ambassador` container, you can see the linked Redis +containers `env`: $ env REDIS_PORT=tcp://172.17.0.136:6379 @@ -98,43 +93,43 @@ containers'senv PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PWD=/ -This environment is used by the ambassador socat script to expose redis -to the world (via the -p 6379:6379 port mapping) +This environment is used by the ambassador `socat` script to expose Redis +to the world (via the `-p 6379:6379` port mapping): $ docker rm redis_ambassador $ sudo ./contrib/mkimage-unittest.sh - $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh + $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379 -then ping the redis server via the ambassador +Now ping the Redis server via the ambassador: -Now goto a different server +Now go to a different server: $ sudo ./contrib/mkimage-unittest.sh - $ docker run -t -i -expose 6379 -name redis_ambassador docker-ut sh + $ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379 -and get the redis-cli image so we can talk over the ambassador bridge +And get the `redis-cli` image so we can talk over the ambassador bridge. $ docker pull relateiq/redis-cli - $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli + $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli redis 172.17.0.160:6379> ping PONG ## The svendowideit/ambassador Dockerfile -The `svendowideit/ambassador` image is a small -busybox image with `socat` built in. When you start -the container, it uses a small `sed` script to parse -out the (possibly multiple) link environment variables to set up the -port forwarding. On the remote host, you need to set the variable using -the `-e` command line option. +The `svendowideit/ambassador` image is a small `busybox` image with +`socat` built in. When you start the container, it uses a small `sed` +script to parse out the (possibly multiple) link environment variables +to set up the port forwarding. On the remote host, you need to set the +variable using the `-e` command line option. -`--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379` -will forward the local `1234` port to the -remote IP and port - in this case `192.168.1.52:6379`. + --expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379 + +Will forward the local `1234` port to the remote IP and port, in this +case `192.168.1.52:6379`. # # @@ -144,9 +139,9 @@ remote IP and port - in this case `192.168.1.52:6379`. # docker build -t SvenDowideit/ambassador . # docker tag SvenDowideit/ambassador ambassador # then to run it (on the host that has the real backend on it) - # docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador + # docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador # on the remote host, you can set up another ambassador - # docker run -t -i -name redis_ambassador -expose 6379 sh + # docker run -t -i --name redis_ambassador --expose 6379 sh FROM docker-ut MAINTAINER SvenDowideit@home.org.au diff --git a/docs/sources/use/basics.md b/docs/sources/use/basics.md index ee3eeabd9d..e4422034d4 100644 --- a/docs/sources/use/basics.md +++ b/docs/sources/use/basics.md @@ -12,10 +12,10 @@ your Docker install, run the following command: # Check that you have a working install $ docker info -If you get `docker: command not found` or something -like `/var/lib/docker/repositories: permission denied` -you may have an incomplete docker installation or insufficient -privileges to access Docker on your machine. +If you get `docker: command not found` or something like +`/var/lib/docker/repositories: permission denied` you may have an +incomplete Docker installation or insufficient privileges to access +Docker on your machine. Please refer to [*Installation*](/installation/#installation-list) for installation instructions. @@ -26,9 +26,9 @@ for installation instructions. $ sudo docker pull ubuntu This will find the `ubuntu` image by name on -[*Docker.io*](../workingwithrepository/#find-public-images-on-dockerio) and -download it from [Docker.io](https://index.docker.io) to a local image -cache. +[*Docker.io*](../workingwithrepository/#find-public-images-on-dockerio) +and download it from [Docker.io](https://index.docker.io) to a local +image cache. > **Note**: > When the image has successfully downloaded, you will see a 12 character @@ -50,7 +50,7 @@ cache. ## Bind Docker to another host/port or a Unix socket -> **Warning**: +> **Warning**: > Changing the default `docker` daemon binding to a > TCP port or Unix *docker* user group will increase your security risks > by allowing non-root users to gain *root* access on the host. Make sure @@ -58,41 +58,44 @@ cache. > to a TCP port, anyone with access to that port has full Docker access; > so it is not advisable on an open network. -With `-H` it is possible to make the Docker daemon -to listen on a specific IP and port. By default, it will listen on -`unix:///var/run/docker.sock` to allow only local -connections by the *root* user. You *could* set it to -`0.0.0.0:4243` or a specific host IP to give access -to everybody, but that is **not recommended** because then it is trivial -for someone to gain root access to the host where the daemon is running. +With `-H` it is possible to make the Docker daemon to listen on a +specific IP and port. By default, it will listen on +`unix:///var/run/docker.sock` to allow only local connections by the +*root* user. You *could* set it to `0.0.0.0:4243` or a specific host IP +to give access to everybody, but that is **not recommended** because +then it is trivial for someone to gain root access to the host where the +daemon is running. -Similarly, the Docker client can use `-H` to connect -to a custom port. +Similarly, the Docker client can use `-H` to connect to a custom port. -`-H` accepts host and port assignment in the -following format: `tcp://[host][:port]` or -`unix://path` +`-H` accepts host and port assignment in the following format: + + tcp://[host][:port]` or `unix://path For example: -- `tcp://host:4243` -> tcp connection on +- `tcp://host:4243` -> TCP connection on host:4243 -- `unix://path/to/socket` -> unix socket located +- `unix://path/to/socket` -> Unix socket located at `path/to/socket` `-H`, when empty, will default to the same value as when no `-H` was passed in. `-H` also accepts short form for TCP bindings: -`host[:port]` or `:port` - # Run docker in daemon mode + host[:port]` or `:port + +Run Docker in daemon mode: + $ sudo /docker -H 0.0.0.0:5555 -d & - # Download an ubuntu image + +Download an `ubuntu` image: + $ sudo docker -H :5555 pull ubuntu -You can use multiple `-H`, for example, if you want -to listen on both TCP and a Unix socket +You can use multiple `-H`, for example, if you want to listen on both +TCP and a Unix socket # Run docker in daemon mode $ sudo /docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -d & diff --git a/docs/sources/use/chef.md b/docs/sources/use/chef.md index 897c2b429a..5568e99afa 100644 --- a/docs/sources/use/chef.md +++ b/docs/sources/use/chef.md @@ -19,8 +19,8 @@ operating systems. ## Installation The cookbook is available on the [Chef Community -Site](http://community.opscode.com/cookbooks/docker) and can be installed using -your favorite cookbook dependency manager. +Site](http://community.opscode.com/cookbooks/docker) and can be +installed using your favorite cookbook dependency manager. The source can be found on [GitHub](https://github.com/bflad/chef-docker). @@ -71,4 +71,4 @@ This is equivalent to running the following command, but under upstart: $ docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry The resources will accept a single string or an array of values for any -docker flags that allow multiple values. +Docker flags that allow multiple values. diff --git a/docs/sources/use/host_integration.md b/docs/sources/use/host_integration.md index 370c00e20a..fa442620e5 100644 --- a/docs/sources/use/host_integration.md +++ b/docs/sources/use/host_integration.md @@ -10,16 +10,15 @@ You can use your Docker containers with process managers like ## Introduction If you want a process manager to manage your containers you will need to -run the docker daemon with the `-r=false` so that -docker will not automatically restart your containers when the host is -restarted. +run the docker daemon with the `-r=false` so that docker will not +automatically restart your containers when the host is restarted. When you have finished setting up your image and are happy with your running container, you can then attach a process manager to manage it. -When your run `docker start -a` docker will -automatically attach to the running container, or start it if needed and -forward all signals so that the process manager can detect when a -container stops and correctly restart it. +When your run `docker start -a` docker will automatically attach to the +running container, or start it if needed and forward all signals so that +the process manager can detect when a container stops and correctly +restart it. Here are a few sample scripts for systemd and upstart to integrate with docker. @@ -27,9 +26,8 @@ docker. ## Sample Upstart Script In this example We've already created a container to run Redis with -`--name redis_server`. To create an upstart script -for our container, we create a file named -`/etc/init/redis.conf` and place the following into +`--name redis_server`. To create an upstart script for our container, we +create a file named `/etc/init/redis.conf` and place the following into it: description "Redis container" diff --git a/docs/sources/use/networking.md b/docs/sources/use/networking.md index 00d0684256..5b76233eff 100644 --- a/docs/sources/use/networking.md +++ b/docs/sources/use/networking.md @@ -1,138 +1,699 @@ -page_title: Configure Networking +page_title: Network Configuration page_description: Docker networking page_keywords: network, networking, bridge, docker, documentation -# Configure Networking +# Network Configuration -## Introduction +## TL;DR -Docker uses Linux bridge capabilities to provide network connectivity to -containers. The `docker0` bridge interface is -managed by Docker for this purpose. When the Docker daemon starts it : +When Docker starts, it creates a virtual interface named `docker0` on +the host machine. It randomly chooses an address and subnet from the +private range defined by [RFC 1918](http://tools.ietf.org/html/rfc1918) +that are not in use on the host machine, and assigns it to `docker0`. +Docker made the choice `172.17.42.1/16` when I started it a few minutes +ago, for example — a 16-bit netmask providing 65,534 addresses for the +host machine and its containers. - - creates the `docker0` bridge if not present - - searches for an IP address range which doesn't overlap with an existing route - - picks an IP in the selected range - - assigns this IP to the `docker0` bridge +But `docker0` is no ordinary interface. It is a virtual *Ethernet +bridge* that automatically forwards packets between any other network +interfaces that are attached to it. This lets containers communicate +both with the host machine and with each other. Every time Docker +creates a container, it creates a pair of “peer” interfaces that are +like opposite ends of a pipe — a packet send on one will be received on +the other. It gives one of the peers to the container to become its +`eth0` interface and keeps the other peer, with a unique name like +`vethAQI2QT`, out in the namespace of the host machine. By binding +every `veth*` interface to the `docker0` bridge, Docker creates a +virtual subnet shared between the host machine and every Docker +container. - +The remaining sections of this document explain all of the ways that you +can use Docker options and — in advanced cases — raw Linux networking +commands to tweak, supplement, or entirely replace Docker’s default +networking configuration. - # List host bridges - $ sudo brctl show - bridge name bridge id STP enabled interfaces - docker0 8000.000000000000 no +## Quick Guide to the Options - # Show docker0 IP address - $ sudo ifconfig docker0 - docker0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx - inet addr:172.17.42.1 Bcast:0.0.0.0 Mask:255.255.0.0 +Here is a quick list of the networking-related Docker command-line +options, in case it helps you find the section below that you are +looking for. -At runtime, a [*specific kind of virtual interface*](#vethxxxx-device) -is given to each container which is then bonded to the `docker0` bridge. -Each container also receives a dedicated IP address from the same range -as `docker0`. The `docker0` IP address is used as the default gateway -for the container. +Some networking command-line options can only be supplied to the Docker +server when it starts up, and cannot be changed once it is running: - # Run a container - $ sudo docker run -t -i -d base /bin/bash - 52f811c5d3d69edddefc75aff5a4525fc8ba8bcfa1818132f9dc7d4f7c7e78b4 + * `-b BRIDGE` or `--bridge=BRIDGE` — see + [Building your own bridge](#bridge-building) - $ sudo brctl show - bridge name bridge id STP enabled interfaces - docker0 8000.fef213db5a66 no vethQCDY1N + * `--bip=CIDR` — see + [Customizing docker0](#docker0) -Above, `docker0` acts as a bridge for the `vethQCDY1N` interface which -is dedicated to the 52f811c5d3d6 container. + * `-H SOCKET...` or `--host=SOCKET...` — + This might sound like it would affect container networking, + but it actually faces in the other direction: + it tells the Docker server over what channels + it should be willing to receive commands + like “run container” and “stop container.” + To learn about the option, + read [Bind Docker to another host/port or a Unix socket](../basics/#bind-docker-to-another-hostport-or-a-unix-socket) + over in the Basics document. -## How to use a specific IP address range + * `--icc=true|false` — see + [Communication between containers](#between-containers) -Docker will try hard to find an IP range that is not used by the host. -Even though it works for most cases, it's not bullet-proof and sometimes -you need to have more control over the IP addressing scheme. + * `--ip=IP_ADDRESS` — see + [Binding container ports](#binding-ports) -For this purpose, Docker allows you to manage the `docker0` -bridge or your own one using the `-b=` -parameter. + * `--ip-forward=true|false` — see + [Communication between containers](#between-containers) -In this scenario: + * `--iptables=true|false` — see + [Communication between containers](#between-containers) - - ensure Docker is stopped - - create your own bridge (`bridge0` for example) - - assign a specific IP to this bridge - - start Docker with the `-b=bridge0` parameter + * `--mtu=BYTES` — see + [Customizing docker0](#docker0) - +There are two networking options that can be supplied either at startup +or when `docker run` is invoked. When provided at startup, set the +default value that `docker run` will later use if the options are not +specified: - # Stop Docker - $ sudo service docker stop + * `--dns=IP_ADDRESS...` — see + [Configuring DNS](#dns) - # Clean docker0 bridge and - # add your very own bridge0 - $ sudo ifconfig docker0 down - $ sudo brctl addbr bridge0 - $ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0 + * `--dns-search=DOMAIN...` — see + [Configuring DNS](#dns) - # Edit your Docker startup file - $ echo "DOCKER_OPTS=\"-b=bridge0\"" >> /etc/default/docker +Finally, several networking options can only be provided when calling +`docker run` because they specify something specific to one container: - # Start Docker - $ sudo service docker start + * `-h HOSTNAME` or `--hostname=HOSTNAME` — see + [Configuring DNS](#dns) and + [How Docker networks a container](#container-networking) - # Ensure bridge0 IP is not changed by Docker - $ sudo ifconfig bridge0 - bridge0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx - inet addr:192.168.227.1 Bcast:192.168.227.255 Mask:255.255.255.0 + * `--link=CONTAINER_NAME:ALIAS` — see + [Configuring DNS](#dns) and + [Communication between containers](#between-containers) - # Run a container - docker run -i -t base /bin/bash + * `--net=bridge|none|container:NAME_or_ID|host` — see + [How Docker networks a container](#container-networking) - # Container IP in the 192.168.227/24 range - root@261c272cd7d5:/# ifconfig eth0 - eth0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx - inet addr:192.168.227.5 Bcast:192.168.227.255 Mask:255.255.255.0 + * `-p SPEC` or `--publish=SPEC` — see + [Binding container ports](#binding-ports) - # bridge0 IP as the default gateway - root@261c272cd7d5:/# route -n - Kernel IP routing table - Destination Gateway Genmask Flags Metric Ref Use Iface - 0.0.0.0 192.168.227.1 0.0.0.0 UG 0 0 0 eth0 - 192.168.227.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 + * `-P` or `--publish-all=true|false` — see + [Binding container ports](#binding-ports) - # hits CTRL+P then CTRL+Q to detach +The following sections tackle all of the above topics in an order that +moves roughly from simplest to most complex. + +## Configuring DNS + +How can Docker supply each container with a hostname and DNS +configuration, without having to build a custom image with the hostname +written inside? Its trick is to overlay three crucial `/etc` files +inside the container with virtual files where it can write fresh +information. You can see this by running `mount` inside a container: + + $$ mount + ... + /dev/disk/by-uuid/1fec...ebdf on /etc/hostname type ext4 ... + /dev/disk/by-uuid/1fec...ebdf on /etc/hosts type ext4 ... + tmpfs on /etc/resolv.conf type tmpfs ... + ... + +This arrangement allows Docker to do clever things like keep +`resolv.conf` up to date across all containers when the host machine +receives new configuration over DHCP later. The exact details of how +Docker maintains these files inside the container can change from one +Docker version to the next, so you should leave the files themselves +alone and use the following Docker options instead. + +Four different options affect container domain name services. + + * `-h HOSTNAME` or `--hostname=HOSTNAME` — sets the hostname by which + the container knows itself. This is written into `/etc/hostname`, + into `/etc/hosts` as the name of the container’s host-facing IP + address, and is the name that `/bin/bash` inside the container will + display inside its prompt. But the hostname is not easy to see from + outside the container. It will not appear in `docker ps` nor in the + `/etc/hosts` file of any other container. + + * `--link=CONTAINER_NAME:ALIAS` — using this option as you `run` a + container gives the new container’s `/etc/hosts` an extra entry + named `ALIAS` that points to the IP address of the container named + `CONTAINER_NAME`. This lets processes inside the new container + connect to the hostname `ALIAS` without having to know its IP. The + `--link=` option is discussed in more detail below, in the section + [Communication between containers](#between-containers). + + * `--dns=IP_ADDRESS...` — sets the IP addresses added as `server` + lines to the container's `/etc/resolv.conf` file. Processes in the + container, when confronted with a hostname not in `/etc/hosts`, will + connect to these IP addresses on port 53 looking for name resolution + services. + + * `--dns-search=DOMAIN...` — sets the domain names that are searched + when a bare unqualified hostname is used inside of the container, by + writing `search` lines into the container’s `/etc/resolv.conf`. + When a container process attempts to access `host` and the search + domain `exmaple.com` is set, for instance, the DNS logic will not + only look up `host` but also `host.example.com`. + +Note that Docker, in the absence of either of the last two options +above, will make `/etc/resolv.conf` inside of each container look like +the `/etc/resolv.conf` of the host machine where the `docker` daemon is +running. The options then modify this default configuration. + +## Communication between containers + +Whether two containers can communicate is governed, at the operating +system level, by three factors. + +1. Does the network topology even connect the containers’ network + interfaces? By default Docker will attach all containers to a + single `docker0` bridge, providing a path for packets to travel + between them. See the later sections of this document for other + possible topologies. + +2. Is the host machine willing to forward IP packets? This is governed + by the `ip_forward` system parameter. Packets can only pass between + containers if this parameter is `1`. Usually you will simply leave + the Docker server at its default setting `--ip-forward=true` and + Docker will go set `ip_forward` to `1` for you when the server + starts up. To check the setting or turn it on manually: + + # Usually not necessary: turning on forwarding, + # on the host where your Docker server is running + + $ cat /proc/sys/net/ipv4/ip_forward + 0 + $ sudo echo 1 > /proc/sys/net/ipv4/ip_forward + $ cat /proc/sys/net/ipv4/ip_forward + 1 + +3. Do your `iptables` allow this particular connection to be made? + Docker will never make changes to your system `iptables` rules if + you set `--iptables=false` when the daemon starts. Otherwise the + Docker server will add a default rule to the `FORWARD` chain with a + blanket `ACCEPT` policy if you retain the default `--icc=true`, or + else will set the policy to `DROP` if `--icc=false`. + +Nearly everyone using Docker will want `ip_forward` to be on, to at +least make communication *possible* between containers. But it is a +strategic question whether to leave `--icc=true` or change it to +`--icc=false` (on Ubuntu, by editing the `DOCKER_OPTS` variable in +`/etc/default/docker` and restarting the Docker server) so that +`iptables` will protect other containers — and the main host — from +having arbitrary ports probed or accessed by a container that gets +compromised. + +If you choose the most secure setting of `--icc=false`, then how can +containers communicate in those cases where you *want* them to provide +each other services? + +The answer is the `--link=CONTAINER_NAME:ALIAS` option, which was +mentioned in the previous section because of its effect upon name +services. If the Docker daemon is running with both `--icc=false` and +`--iptables=true` then, when it sees `docker run` invoked with the +`--link=` option, the Docker server will insert a pair of `iptables` +`ACCEPT` rules so that the new container can connect to the ports +exposed by the other container — the ports that it mentioned in the +`EXPOSE` lines of its `Dockerfile`. Docker has more documentation on +this subject — see the [Link Containers](working_with_links_names.md) +page for further details. + +> **Note**: +> The value `CONTAINER_NAME` in `--link=` must either be an +> auto-assigned Docker name like `stupefied_pare` or else the name you +> assigned with `--name=` when you ran `docker run`. It cannot be a +> hostname, which Docker will not recognize in the context of the +> `--link=` option. + +You can run the `iptables` command on your Docker host to see whether +the `FORWARD` chain has a default policy of `ACCEPT` or `DROP`: + + # When --icc=false, you should see a DROP rule: + + $ sudo iptables -L -n + ... + Chain FORWARD (policy ACCEPT) + target prot opt source destination + DROP all -- 0.0.0.0/0 0.0.0.0/0 + ... + + # When a --link= has been created under --icc=false, + # you should see port-specific ACCEPT rules overriding + # the subsequent DROP policy for all other packets: + + $ sudo iptables -L -n + ... + Chain FORWARD (policy ACCEPT) + target prot opt source destination + ACCEPT tcp -- 172.17.0.2 172.17.0.3 tcp spt:80 + ACCEPT tcp -- 172.17.0.3 172.17.0.2 tcp dpt:80 + DROP all -- 0.0.0.0/0 0.0.0.0/0 + +> **Note**: +> Docker is careful that its host-wide `iptables` rules fully expose +> containers to each other’s raw IP addresses, so connections from one +> container to another should always appear to be originating from the +> first container’s own IP address. + +## Binding container ports to the host + +By default Docker containers can make connections to the outside world, +but the outside world cannot connect to containers. Each outgoing +connection will appear to originate from one of the host machine’s own +IP addresses thanks to an `iptables` masquerading rule on the host +machine that the Docker server creates when it starts: + + # You can see that the Docker server creates a + # masquerade rule that let containers connect + # to IP addresses in the outside world: + + $ sudo iptables -t nat -L -n + ... + Chain POSTROUTING (policy ACCEPT) + target prot opt source destination + MASQUERADE all -- 172.17.0.0/16 !172.17.0.0/16 + ... + +But if you want containers to accept incoming connections, you will need +to provide special options when invoking `docker run`. These options +are covered in more detail on the [Redirect Ports](port_redirection.md) +page. There are two approaches. + +First, you can supply `-P` or `--publish-all=true|false` to `docker run` +which is a blanket operation that identifies every port with an `EXPOSE` +line in the image’s `Dockerfile` and maps it to a host port somewhere in +the range 49000–49900. This tends to be a bit inconvenient, since you +then have to run other `docker` sub-commands to learn which external +port a given service was mapped to. + +More convenient is the `-p SPEC` or `--publish=SPEC` option which lets +you be explicit about exactly which external port on the Docker server — +which can be any port at all, not just those in the 49000–49900 block — +you want mapped to which port in the container. + +Either way, you should be able to peek at what Docker has accomplished +in your network stack by examining your NAT tables. + + # What your NAT rules might look like when Docker + # is finished setting up a -P forward: + + $ iptables -t nat -L -n + ... + Chain DOCKER (2 references) + target prot opt source destination + DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:49153 to:172.17.0.2:80 + + # What your NAT rules might look like when Docker + # is finished setting up a -p 80:80 forward: + + Chain DOCKER (2 references) + target prot opt source destination + DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 to:172.17.0.2:80 + +You can see that Docker has exposed these container ports on `0.0.0.0`, +the wildcard IP address that will match any possible incoming port on +the host machine. If you want to be more restrictive and only allow +container services to be contacted through a specific external interface +on the host machine, you have two choices. When you invoke `docker run` +you can use either `-p IP:host_port:container_port` or `-p IP::port` to +specify the external interface for one particular binding. + +Or if you always want Docker port forwards to bind to one specific IP +address, you can edit your system-wide Docker server settings (on +Ubuntu, by editing `DOCKER_OPTS` in `/etc/default/docker`) and add the +option `--ip=IP_ADDRESS`. Remember to restart your Docker server after +editing this setting. + +Again, this topic is covered without all of these low-level networking +details in the [Redirect Ports](port_redirection.md) document if you +would like to use that as your port redirection reference instead. + +## Customizing docker0 + +By default, the Docker server creates and configures the host system’s +`docker0` interface as an *Ethernet bridge* inside the Linux kernel that +can pass packets back and forth between other physical or virtual +network interfaces so that they behave as a single Ethernet network. + +Docker configures `docker0` with an IP address and netmask so the host +machine can both receive and send packets to containers connected to the +bridge, and gives it an MTU — the *maximum transmission unit* or largest +packet length that the interface will allow — of either 1,500 bytes or +else a more specific value copied from the Docker host’s interface that +supports its default route. Both are configurable at server startup: + + * `--bip=CIDR` — supply a specific IP address and netmask for the + `docker0` bridge, using standard CIDR notation like + `192.168.1.5/24`. + + * `--mtu=BYTES` — override the maximum packet length on `docker0`. + +On Ubuntu you would add these to the `DOCKER_OPTS` setting in +`/etc/default/docker` on your Docker host and restarting the Docker +service. + +Once you have one or more containers up and running, you can confirm +that Docker has properly connected them to the `docker0` bridge by +running the `brctl` command on the host machine and looking at the +`interfaces` column of the output. Here is a host with two different +containers connected: # Display bridge info + $ sudo brctl show - bridge name bridge id STP enabled interfaces - bridge0 8000.fe7c2e0faebd no vethAQI2QT + bridge name bridge id STP enabled interfaces + docker0 8000.3a1d7362b4ee no veth65f9 + vethdda6 -## Container intercommunication +If the `brctl` command is not installed on your Docker host, then on +Ubuntu you should be able to run `sudo apt-get install bridge-utils` to +install it. -The value of the Docker daemon's `icc` parameter -determines whether containers can communicate with each other over the -bridge network. +Finally, the `docker0` Ethernet bridge settings are used every time you +create a new container. Docker selects a free IP address from the range +available on the bridge each time you `docker run` a new container, and +configures the container’s `eth0` interface with that IP address and the +bridge’s netmask. The Docker host’s own IP address on the bridge is +used as the default gateway by which each container reaches the rest of +the Internet. - - The default, `-icc=true` allows containers to communicate with each other. - - `-icc=false` means containers are isolated from each other. + # The network, as seen from a container -Docker uses `iptables` under the hood to either -accept or drop communication between containers. + $ sudo docker run -i -t --rm base /bin/bash -## What is the vethXXXX device? + $$ ip addr show eth0 + 24: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 32:6f:e0:35:57:91 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.3/16 scope global eth0 + valid_lft forever preferred_lft forever + inet6 fe80::306f:e0ff:fe35:5791/64 scope link + valid_lft forever preferred_lft forever -Well. Things get complicated here. + $$ ip route + default via 172.17.42.1 dev eth0 + 172.17.0.0/16 dev eth0 proto kernel scope link src 172.17.0.3 -The `vethXXXX` interface is the host side of a -point-to-point link between the host and the corresponding container; -the other side of the link is the container's `eth0` -interface. This pair (host `vethXXX` and container -`eth0`) are connected like a tube. Everything that -comes in one side will come out the other side. + $$ exit -All the plumbing is delegated to Linux network capabilities (check the -ip link command) and the namespaces infrastructure. +Remember that the Docker host will not be willing to forward container +packets out on to the Internet unless its `ip_forward` system setting is +`1` — see the section above on [Communication between +containers](#between-containers) for details. -## I want more +## Building your own bridge -Jérôme Petazzoni has create `pipework` to connect together containers in -arbitrarily complex scenarios: -[https://github.com/jpetazzo/pipework](https://github.com/jpetazzo/pipework) +If you want to take Docker out of the business of creating its own +Ethernet bridge entirely, you can set up your own bridge before starting +Docker and use `-b BRIDGE` or `--bridge=BRIDGE` to tell Docker to use +your bridge instead. If you already have Docker up and running with its +old `bridge0` still configured, you will probably want to begin by +stopping the service and removing the interface: + + # Stopping Docker and removing docker0 + + $ sudo service docker stop + $ sudo ip link set dev docker0 down + $ sudo brctl delbr docker0 + +Then, before starting the Docker service, create your own bridge and +give it whatever configuration you want. Here we will create a simple +enough bridge that we really could just have used the options in the +previous section to customize `docker0`, but it will be enough to +illustrate the technique. + + # Create our own bridge + + $ sudo brctl addbr bridge0 + $ sudo ip addr add 192.168.5.1/24 dev bridge0 + $ sudo ip link set dev bridge0 up + + # Confirming that our bridge is up and running + + $ ip addr show bridge0 + 4: bridge0: mtu 1500 qdisc noop state UP group default + link/ether 66:38:d0:0d:76:18 brd ff:ff:ff:ff:ff:ff + inet 192.168.5.1/24 scope global bridge0 + valid_lft forever preferred_lft forever + + # Tell Docker about it and restart (on Ubuntu) + + $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker + $ sudo service docker start + +The result should be that the Docker server starts successfully and is +now prepared to bind containers to the new bridge. After pausing to +verify the bridge’s configuration, try creating a container — you will +see that its IP address is in your new IP address range, which Docker +will have auto-detected. + +Just as we learned in the previous section, you can use the `brctl show` +command to see Docker add and remove interfaces from the bridge as you +start and stop containers, and can run `ip addr` and `ip route` inside a +container to see that it has been given an address in the bridge’s IP +address range and has been told to use the Docker host’s IP address on +the bridge as its default gateway to the rest of the Internet. + +## How Docker networks a container + +While Docker is under active development and continues to tweak and +improve its network configuration logic, the shell commands in this +section are rough equivalents to the steps that Docker takes when +configuring networking for each new container. + +Let’s review a few basics. + +To communicate using the Internet Protocol (IP), a machine needs access +to at least one network interface at which packets can be sent and +received, and a routing table that defines the range of IP addresses +reachable through that interface. Network interfaces do not have to be +physical devices. In fact, the `lo` loopback interface available on +every Linux machine (and inside each Docker container) is entirely +virtual — the Linux kernel simply copies loopback packets directly from +the sender’s memory into the receiver’s memory. + +Docker uses special virtual interfaces to let containers communicate +with the host machine — pairs of virtual interfaces called “peers” that +are linked inside of the host machine’s kernel so that packets can +travel between them. They are simple to create, as we will see in a +moment. + +The steps with which Docker configures a container are: + +1. Create a pair of peer virtual interfaces. + +2. Give one of them a unique name like `veth65f9`, keep it inside of + the main Docker host, and bind it to `docker0` or whatever bridge + Docker is supposed to be using. + +3. Toss the other interface over the wall into the new container (which + will already have been provided with an `lo` interface) and rename + it to the much prettier name `eth0` since, inside of the container’s + separate and unique network interface namespace, there are no + physical interfaces with which this name could collide. + +4. Give the container’s `eth0` a new IP address from within the + bridge’s range of network addresses, and set its default route to + the IP address that the Docker host owns on the bridge. + +With these steps complete, the container now possesses an `eth0` +(virtual) network card and will find itself able to communicate with +other containers and the rest of the Internet. + +You can opt out of the above process for a particular container by +giving the `--net=` option to `docker run`, which takes four possible +values. + + * `--net=bridge` — The default action, that connects the container to + the Docker bridge as described above. + + * `--net=host` — Tells Docker to skip placing the container inside of + a separate network stack. In essence, this choice tells Docker to + **not containerize the container’s networking**! While container + processes will still be confined to their own filesystem and process + list and resource limits, a quick `ip addr` command will show you + that, network-wise, they live “outside” in the main Docker host and + have full access to its network interfaces. Note that this does + **not** let the container reconfigure the host network stack — that + would require `--privileged=true` — but it does let container + processes open low-numbered ports like any other root process. + + * `--net=container:NAME_or_ID` — Tells Docker to put this container’s + processes inside of the network stack that has already been created + inside of another container. The new container’s processes will be + confined to their own filesystem and process list and resource + limits, but will share the same IP address and port numbers as the + first container, and processes on the two containers will be able to + connect to each other over the loopback interface. + + * `--net=none` — Tells Docker to put the container inside of its own + network stack but not to take any steps to configure its network, + leaving you free to build any of the custom configurations explored + in the last few sections of this document. + +To get an idea of the steps that are necessary if you use `--net=none` +as described in that last bullet point, here are the commands that you +would run to reach roughly the same configuration as if you had let +Docker do all of the configuration: + + # At one shell, start a container and + # leave its shell idle and running + + $ sudo docker run -i -t --rm --net=none base /bin/bash + root@63f36fc01b5f:/# + + # At another shell, learn the container process ID + # and create its namespace entry in /var/run/netns/ + # for the "ip netns" command we will be using below + + $ sudo docker inspect -f '{{.State.Pid}}' 63f36fc01b5f + 2778 + $ pid=2778 + $ sudo mkdir -p /var/run/netns + $ sudo ln -s /proc/$pid/ns/net /var/run/netns/$pid + + # Check the bridge’s IP address and netmask + + $ ip addr show docker0 + 21: docker0: ... + inet 172.17.42.1/16 scope global docker0 + ... + + # Create a pair of "peer" interfaces A and B, + # bind the A end to the bridge, and bring it up + + $ sudo ip link add A type veth peer name B + $ sudo brctl addif docker0 A + $ sudo ip link set A up + + # Place B inside the container's network namespace, + # rename to eth0, and activate it with a free IP + + $ sudo ip link set B netns $pid + $ sudo ip netns exec $pid ip link set dev B name eth0 + $ sudo ip netns exec $pid ip link set eth0 up + $ sudo ip netns exec $pid ip addr add 172.17.42.99/16 dev eth0 + $ sudo ip netns exec $pid ip route add default via 172.17.42.1 + +At this point your container should be able to perform networking +operations as usual. + +When you finally exit the shell and Docker cleans up the container, the +network namespace is destroyed along with our virtual `eth0` — whose +destruction in turn destroys interface `A` out in the Docker host and +automatically un-registers it from the `docker0` bridge. So everything +gets cleaned up without our having to run any extra commands! Well, +almost everything: + + # Clean up dangling symlinks in /var/run/netns + + find -L /var/run/netns -type l -delete + +Also note that while the script above used modern `ip` command instead +of old deprecated wrappers like `ipconfig` and `route`, these older +commands would also have worked inside of our container. The `ip addr` +command can be typed as `ip a` if you are in a hurry. + +Finally, note the importance of the `ip netns exec` command, which let +us reach inside and configure a network namespace as root. The same +commands would not have worked if run inside of the container, because +part of safe containerization is that Docker strips container processes +of the right to configure their own networks. Using `ip netns exec` is +what let us finish up the configuration without having to take the +dangerous step of running the container itself with `--privileged=true`. + +## Tools and Examples + +Before diving into the following sections on custom network topologies, +you might be interested in glancing at a few external tools or examples +of the same kinds of configuration. Here are two: + + * Jérôme Petazzoni has create a `pipework` shell script to help you + connect together containers in arbitrarily complex scenarios: + + + * Brandon Rhodes has created a whole network topology of Docker + containers for the next edition of Foundations of Python Network + Programming that includes routing, NAT’d firewalls, and servers that + offer HTTP, SMTP, POP, IMAP, Telnet, SSH, and FTP: + + +Both tools use networking commands very much like the ones you saw in +the previous section, and will see in the following sections. + +## Building a point-to-point connection + +By default, Docker attaches all containers to the virtual subnet +implemented by `docker0`. You can create containers that are each +connected to some different virtual subnet by creating your own bridge +as shown in [Building your own bridge](#bridge-building), starting each +container with `docker run --net=none`, and then attaching the +containers to your bridge with the shell commands shown in [How Docker +networks a container](#container-networking). + +But sometimes you want two particular containers to be able to +communicate directly without the added complexity of both being bound to +a host-wide Ethernet bridge. + +The solution is simple: when you create your pair of peer interfaces, +simply throw *both* of them into containers, and configure them as +classic point-to-point links. The two containers will then be able to +communicate directly (provided you manage to tell each container the +other’s IP address, of course). You might adjust the instructions of +the previous section to go something like this: + + # Start up two containers in two terminal windows + + $ sudo docker run -i -t --rm --net=none base /bin/bash + root@1f1f4c1f931a:/# + + $ sudo docker run -i -t --rm --net=none base /bin/bash + root@12e343489d2f:/# + + # Learn the container process IDs + # and create their namespace entries + + $ sudo docker inspect -f '{{.State.Pid}}' 1f1f4c1f931a + 2989 + $ sudo docker inspect -f '{{.State.Pid}}' 12e343489d2f + 3004 + $ sudo mkdir -p /var/run/netns + $ sudo ln -s /proc/2989/ns/net /var/run/netns/2989 + $ sudo ln -s /proc/3004/ns/net /var/run/netns/3004 + + # Create the "peer" interfaces and hand them out + + $ sudo ip link add A type veth peer name B + + $ sudo ip link set A netns 2989 + $ sudo ip netns exec 2989 ip addr add 10.1.1.1/32 dev A + $ sudo ip netns exec 2989 ip link set A up + $ sudo ip netns exec 2989 ip route add 10.1.1.2/32 dev A + + $ sudo ip link set B netns 3004 + $ sudo ip netns exec 3004 ip addr add 10.1.1.2/32 dev B + $ sudo ip netns exec 3004 ip link set B up + $ sudo ip netns exec 3004 ip route add 10.1.1.1/32 dev B + +The two containers should now be able to ping each other and make +connections sucessfully. Point-to-point links like this do not depend +on a subnet nor a netmask, but on the bare assertion made by `ip route` +that some other single IP address is connected to a particular network +interface. + +Note that point-to-point links can be safely combined with other kinds +of network connectivity — there is no need to start the containers with +`--net=none` if you want point-to-point links to be an addition to the +container’s normal networking instead of a replacement. + +A final permutation of this pattern is to create the point-to-point link +between the Docker host and one container, which would allow the host to +communicate with that one container on some single IP address and thus +communicate “out-of-band” of the bridge that connects the other, more +usual containers. But unless you have very specific networking needs +that drive you to such a solution, it is probably far preferable to use +`--icc=false` to lock down inter-container communication, as we explored +earlier. diff --git a/docs/sources/use/port_redirection.md b/docs/sources/use/port_redirection.md index 9f2ce98eae..315ef2650d 100644 --- a/docs/sources/use/port_redirection.md +++ b/docs/sources/use/port_redirection.md @@ -11,7 +11,7 @@ port. When this service runs inside a container, one can connect to the port after finding the IP address of the container as follows: # Find IP address of container with ID - $ docker inspect | grep IPAddress | cut -d '"' -f 4 + $ docker inspect --format='{{.NetworkSettings.IPAddress}}' However, this IP address is local to the host system and the container port is not reachable by the outside world. Furthermore, even if the @@ -29,10 +29,10 @@ containers, Docker provides the linking mechanism. ## Auto map all exposed ports on the host To bind all the exposed container ports to the host automatically, use -`docker run -P `. The mapped host ports -will be auto-selected from a pool of unused ports (49000..49900), and -you will need to use `docker ps`, `docker inspect ` or -`docker port ` to determine what they are. +`docker run -P `. The mapped host ports will be auto-selected +from a pool of unused ports (49000..49900), and you will need to use +`docker ps`, `docker inspect ` or `docker port + ` to determine what they are. ## Binding a port to a host interface @@ -65,9 +65,9 @@ combinations described for TCP work. Here is only one example: # Bind UDP port 5353 of the container to UDP port 53 on 127.0.0.1 of the host machine. $ docker run -p 127.0.0.1:53:5353/udp -The command `docker port` lists the interface and port on the host machine -bound to a given container port. It is useful when using dynamically allocated -ports: +The command `docker port` lists the interface and port on the host +machine bound to a given container port. It is useful when using +dynamically allocated ports: # Bind to a dynamically allocated port $ docker run -p 127.0.0.1::8080 --name dyn-bound @@ -79,24 +79,25 @@ ports: ## Linking a container Communication between two containers can also be established in a -docker-specific way called linking. +Docker-specific way called linking. -To briefly present the concept of linking, let us consider two containers: -`server`, containing the service, and `client`, accessing the service. Once -`server` is running, `client` is started and links to server. Linking sets -environment variables in `client` giving it some information about `server`. -In this sense, linking is a method of service discovery. +To briefly present the concept of linking, let us consider two +containers: `server`, containing the service, and `client`, accessing +the service. Once `server` is running, `client` is started and links to +server. Linking sets environment variables in `client` giving it some +information about `server`. In this sense, linking is a method of +service discovery. -Let us now get back to our topic of interest; communication between the two -containers. We mentioned that the tricky part about this communication was that -the IP address of `server` was not fixed. Therefore, some of the environment -variables are going to be used to inform `client` about this IP address. This -process called exposure, is possible because `client` is started after `server` -has been started. +Let us now get back to our topic of interest; communication between the +two containers. We mentioned that the tricky part about this +communication was that the IP address of `server` was not fixed. +Therefore, some of the environment variables are going to be used to +inform `client` about this IP address. This process called exposure, is +possible because the `client` is started after the `server` has been started. -Here is a full example. On `server`, the port of interest is exposed. The -exposure is done either through the `--expose` parameter to the `docker run` -command, or the `EXPOSE` build command in a Dockerfile: +Here is a full example. On `server`, the port of interest is exposed. +The exposure is done either through the `--expose` parameter to the +`docker run` command, or the `EXPOSE` build command in a `Dockerfile`: # Expose port 80 $ docker run --expose 80 --name server @@ -106,7 +107,7 @@ The `client` then links to the `server`: # Link $ docker run --name client --link server:linked-server -`client` locally refers to `server` as `linked-server`. The following +Here `client` locally refers to `server` as `linked-server`. The following environment variables, among others, are available on `client`: # The default protocol, ip, and port of the service running in the container @@ -118,7 +119,9 @@ environment variables, among others, are available on `client`: $ LINKED-SERVER_PORT_80_TCP_ADDR=172.17.0.8 $ LINKED-SERVER_PORT_80_TCP_PORT=80 -This tells `client` that a service is running on port 80 of `server` and that -`server` is accessible at the IP address 172.17.0.8 +This tells `client` that a service is running on port 80 of `server` and +that `server` is accessible at the IP address `172.17.0.8`: + +> **Note:** +> Using the `-p` parameter also exposes the port. -Note: Using the `-p` parameter also exposes the port. diff --git a/docs/sources/use/puppet.md b/docs/sources/use/puppet.md index a0d20ab446..81ae05ba56 100644 --- a/docs/sources/use/puppet.md +++ b/docs/sources/use/puppet.md @@ -12,7 +12,7 @@ page_keywords: puppet, installation, usage, docker, documentation ## Requirements To use this guide you'll need a working installation of Puppet from -[Puppetlabs](https://puppetlabs.com) . +[Puppet Labs](https://puppetlabs.com) . The module also currently uses the official PPA so only works with Ubuntu. @@ -26,8 +26,8 @@ installed using the built-in module tool. $ puppet module install garethr/docker It can also be found on -[GitHub](https://github.com/garethr/garethr-docker) if you would -rather download the source. +[GitHub](https://github.com/garethr/garethr-docker) if you would rather +download the source. ## Usage @@ -88,5 +88,6 @@ Run also contains a number of optional parameters: dns => ['8.8.8.8', '8.8.4.4'], } -Note that ports, env, dns and volumes can be set with either a single -string or as above with an array of values. +> *Note:* +> The `ports`, `env`, `dns` and `volumes` attributes can be set with either a single +> string or as above with an array of values. diff --git a/docs/sources/use/working_with_links_names.md b/docs/sources/use/working_with_links_names.md index 6951e3c26f..d69f3f1751 100644 --- a/docs/sources/use/working_with_links_names.md +++ b/docs/sources/use/working_with_links_names.md @@ -6,18 +6,16 @@ page_keywords: Examples, Usage, links, linking, docker, documentation, examples, ## Introduction -From version 0.6.5 you are now able to `name` a container and `link` it to -another container by referring to its name. This will create a parent -> child -relationship where the parent container can see selected information about its -child. +From version 0.6.5 you are now able to `name` a container and `link` it +to another container by referring to its name. This will create a parent +-> child relationship where the parent container can see selected +information about its child. ## Container Naming -New in version v0.6.5. - -You can now name your container by using the `--name` flag. If no name is -provided, Docker will automatically generate a name. You can see this name -using the `docker ps` command. +You can now name your container by using the `--name` flag. If no name +is provided, Docker will automatically generate a name. You can see this +name using the `docker ps` command. # format is "sudo docker run --name " $ sudo docker run --name test ubuntu /bin/bash @@ -29,48 +27,49 @@ using the `docker ps` command. ## Links: service discovery for docker -New in version v0.6.5. - Links allow containers to discover and securely communicate with each -other by using the flag `-link name:alias`. Inter-container communication -can be disabled with the daemon flag `-icc=false`. With this flag set to -`false`, Container A cannot access Container unless explicitly allowed via -a link. This is a huge win for securing your containers. When two containers -are linked together Docker creates a parent child relationship between the -containers. The parent container will be able to access information via -environment variables of the child such as name, exposed ports, IP and other -selected environment variables. +other by using the flag `--link name:alias`. Inter-container +communication can be disabled with the daemon flag `--icc=false`. With +this flag set to `false`, Container A cannot access Container B unless +explicitly allowed via a link. This is a huge win for securing your +containers. When two containers are linked together Docker creates a +parent child relationship between the containers. The parent container +will be able to access information via environment variables of the +child such as name, exposed ports, IP and other selected environment +variables. -When linking two containers Docker will use the exposed ports of the container -to create a secure tunnel for the parent to access. If a database container -only exposes port 8080 then the linked container will only be allowed to access -port 8080 and nothing else if inter-container communication is set to false. +When linking two containers Docker will use the exposed ports of the +container to create a secure tunnel for the parent to access. If a +database container only exposes port 8080 then the linked container will +only be allowed to access port 8080 and nothing else if inter-container +communication is set to false. -For example, there is an image called `crosbymichael/redis` that exposes the -port 6379 and starts the Redis server. Let's name the container as `redis` -based on that image and run it as daemon. +For example, there is an image called `crosbymichael/redis` that exposes +the port 6379 and starts the Redis server. Let's name the container as +`redis` based on that image and run it as daemon. $ sudo docker run -d --name redis crosbymichael/redis -We can issue all the commands that you would expect using the name `redis`; -start, stop, attach, using the name for our container. The name also allows -us to link other containers into this one. +We can issue all the commands that you would expect using the name +`redis`; start, stop, attach, using the name for our container. The name +also allows us to link other containers into this one. -Next, we can start a new web application that has a dependency on Redis and -apply a link to connect both containers. If you noticed when running our Redis -server we did not use the `-p` flag to publish the Redis port to the host -system. Redis exposed port 6379 and this is all we need to establish a link. +Next, we can start a new web application that has a dependency on Redis +and apply a link to connect both containers. If you noticed when running +our Redis server we did not use the `-p` flag to publish the Redis port +to the host system. Redis exposed port 6379 and this is all we need to +establish a link. $ sudo docker run -t -i --link redis:db --name webapp ubuntu bash When you specified `--link redis:db` you are telling Docker to link the container named `redis` into this new container with the alias `db`. -Environment variables are prefixed with the alias so that the parent container -can access network and environment information from the containers that are -linked into it. +Environment variables are prefixed with the alias so that the parent +container can access network and environment information from the +containers that are linked into it. -If we inspect the environment variables of the second container, we would see -all the information about the child container. +If we inspect the environment variables of the second container, we +would see all the information about the child container. $ root@4c01db0b339c:/# env @@ -90,20 +89,20 @@ all the information about the child container. _=/usr/bin/env root@4c01db0b339c:/# -Accessing the network information along with the environment of the child -container allows us to easily connect to the Redis service on the specific -IP and port in the environment. +Accessing the network information along with the environment of the +child container allows us to easily connect to the Redis service on the +specific IP and port in the environment. > **Note**: > These Environment variables are only set for the first process in the > container. Similarly, some daemons (such as `sshd`) > will scrub them when spawning shells for connection. -You can work around this by storing the initial `env` in a file, or looking -at `/proc/1/environ`. +You can work around this by storing the initial `env` in a file, or +looking at `/proc/1/environ`. -Running `docker ps` shows the 2 containers, and the `webapp/db` alias name for -the Redis container. +Running `docker ps` shows the 2 containers, and the `webapp/db` alias +name for the Redis container. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES @@ -112,13 +111,13 @@ the Redis container. ## Resolving Links by Name -New in version v0.11. +> *Note:* New in version v0.11. Linked containers can be accessed by hostname. Hostnames are mapped by appending entries to '/etc/hosts' using the linked container's alias. -For example, linking a container using '--link redis:db' will generate the -following '/etc/hosts' file: +For example, linking a container using '--link redis:db' will generate +the following '/etc/hosts' file: root@6541a75d44a0:/# cat /etc/hosts 172.17.0.3 6541a75d44a0 diff --git a/docs/sources/use/working_with_volumes.md b/docs/sources/use/working_with_volumes.md index 7d6136b85a..4c0a46ff1a 100644 --- a/docs/sources/use/working_with_volumes.md +++ b/docs/sources/use/working_with_volumes.md @@ -8,8 +8,8 @@ page_keywords: Examples, Usage, volume, docker, documentation, examples A *data volume* is a specially-designated directory within one or more containers that bypasses the [*Union File -System*](/terms/layer/#ufs-def) to provide several useful features -for persistent or shared data: +System*](/terms/layer/#ufs-def) to provide several useful features for +persistent or shared data: - **Data volumes can be shared and reused between containers:** This is the feature that makes data volumes so powerful. You can @@ -28,30 +28,26 @@ for persistent or shared data: Each container can have zero or more data volumes. -New in version v0.3.0. - ## Getting Started -Using data volumes is as simple as adding a `-v` -parameter to the `docker run` command. The -`-v` parameter can be used more than once in order -to create more volumes within the new container. To create a new +Using data volumes is as simple as adding a `-v` parameter to the +`docker run` command. The `-v` parameter can be used more than once in +order to create more volumes within the new container. To create a new container with two new volumes: $ docker run -v /var/volume1 -v /var/volume2 busybox true This command will create the new container with two new volumes that -exits instantly (`true` is pretty much the smallest, -simplest program that you can run). Once created you can mount its -volumes in any other container using the `--volumes-from` -option; irrespective of whether the container is running or -not. +exits instantly (`true` is pretty much the smallest, simplest program +that you can run). You can then mount its volumes in any other container +using the `run` `--volumes-from` option; irrespective of whether the +volume container is running or not. -Or, you can use the VOLUME instruction in a Dockerfile to add one or +Or, you can use the `VOLUME` instruction in a `Dockerfile` to add one or more new volumes to any container created from that image: # BUILD-USING: $ docker build -t data . - # RUN-USING: $ docker run -name DATA data + # RUN-USING: $ docker run --name DATA data FROM busybox VOLUME ["/var/volume1", "/var/volume2"] CMD ["/bin/true"] @@ -63,23 +59,23 @@ containers, or want to use from non-persistent containers, it's best to create a named Data Volume Container, and then to mount the data from it. -Create a named container with volumes to share (`/var/volume1` -and `/var/volume2`): +Create a named container with volumes to share (`/var/volume1` and +`/var/volume2`): - $ docker run -v /var/volume1 -v /var/volume2 -name DATA busybox true + $ docker run -v /var/volume1 -v /var/volume2 --name DATA busybox true Then mount those data volumes into your application containers: - $ docker run -t -i -rm -volumes-from DATA -name client1 ubuntu bash + $ docker run -t -i --rm --from DATA --name client1 ubuntu bash -You can use multiple `-volumes-from` parameters to -bring together multiple data volumes from multiple containers. +You can use multiple `-volumes-from` parameters to bring together +multiple data volumes from multiple containers. -Interestingly, you can mount the volumes that came from the -`DATA` container in yet another container via the -`client1` middleman container: +Interestingly, you can mount the volumes that came from the `DATA` +container in yet another container via the `client1` middleman +container: - $ docker run -t -i -rm -volumes-from client1 -name client2 ubuntu bash + $ docker run -t -i --rm --volumes-from client1 --name client2 ubuntu bash This allows you to abstract the actual data source from users of that data, similar to [*Ambassador Pattern Linking*]( @@ -94,14 +90,15 @@ upgrade, or effectively migrate data volumes between containers. -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. -You must specify an absolute path for `host-dir`. If `host-dir` is missing from -the command, then Docker creates a new volume. If `host-dir` is present but -points to a non-existent directory on the host, Docker will automatically -create this directory and use it as the source of the bind-mount. +You must specify an absolute path for `host-dir`. If `host-dir` is +missing from the command, then Docker creates a new volume. If +`host-dir` is present but points to a non-existent directory on the +host, Docker will automatically create this directory and use it as the +source of the bind-mount. -Note that this is not available from a Dockerfile due the portability and -sharing purpose of it. The `host-dir` volumes are entirely host-dependent -and might not work on any other machine. +Note that this is not available from a `Dockerfile` due the portability +and sharing purpose of it. The `host-dir` volumes are entirely +host-dependent and might not work on any other machine. For example: @@ -110,28 +107,30 @@ For example: # Example: $ sudo docker run -i -t -v /var/log:/logs_from_host:ro ubuntu bash -The command above mounts the host directory `/var/log` into the container -with *read only* permissions as `/logs_from_host`. +The command above mounts the host directory `/var/log` into the +container with *read only* permissions as `/logs_from_host`. New in version v0.5.0. ### Note for OS/X users and remote daemon users: -OS/X users run `boot2docker` to create a minimalist virtual machine running -the docker daemon. That virtual machine then launches docker commands on -behalf of the OS/X command line. The means that `host directories` refer to -directories in the `boot2docker` virtual machine, not the OS/X filesystem. +OS/X users run `boot2docker` to create a minimalist virtual machine +running the docker daemon. That virtual machine then launches docker +commands on behalf of the OS/X command line. The means that `host +directories` refer to directories in the `boot2docker` virtual machine, +not the OS/X filesystem. -Similarly, anytime when the docker daemon is on a remote machine, the +Similarly, anytime when the docker daemon is on a remote machine, the `host directories` always refer to directories on the daemon's machine. ### Backup, restore, or migrate data volumes -You cannot back up volumes using `docker export`, `docker save` and `docker cp` -because they are external to images. Instead you can use `--volumes-from` to -start a new container that can access the data-container's volume. For example: +You cannot back up volumes using `docker export`, `docker save` and +`docker cp` because they are external to images. Instead you can use +`--volumes-from` to start a new container that can access the +data-container's volume. For example: - $ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data + $ sudo docker run --rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data - `-rm`: remove the container when it exits @@ -144,16 +143,17 @@ start a new container that can access the data-container's volume. For example: - `tar cvf /backup/backup.tar /data`: creates an uncompressed tar file of all the files in the `/data` directory -Then to restore to the same container, or another that you`ve made elsewhere: +Then to restore to the same container, or another that you've made +elsewhere: # create a new data container - $ sudo docker run -v /data -name DATA2 busybox true + $ sudo docker run -v /data --name DATA2 busybox true # untar the backup files into the new container᾿s data volume - $ sudo docker run -rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar + $ sudo docker run --rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar data/ data/sven.txt # compare to the original container - $ sudo docker run -rm --volumes-from DATA -v `pwd`:/backup busybox ls /data + $ sudo docker run --rm --volumes-from DATA -v `pwd`:/backup busybox ls /data sven.txt You can use the basic techniques above to automate backup, migration and diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md index 07f130a909..2b4ad613cc 100644 --- a/docs/sources/use/workingwithrepository.md +++ b/docs/sources/use/workingwithrepository.md @@ -18,15 +18,14 @@ You can find one or more repositories hosted on a *registry*. There are two types of *registry*: public and private. There's also a default *registry* that Docker uses which is called [Docker.io](http://index.docker.io). -[Docker.io](http://index.docker.io) is the home of -"top-level" repositories and public "user" repositories. The Docker -project provides [Docker.io](http://index.docker.io) to host public and -[private repositories](https://index.docker.io/plans/), namespaced by -user. We provide user authentication and search over all the public -repositories. +[Docker.io](http://index.docker.io) is the home of "top-level" +repositories and public "user" repositories. The Docker project +provides [Docker.io](http://index.docker.io) to host public and [private +repositories](https://index.docker.io/plans/), namespaced by user. We +provide user authentication and search over all the public repositories. -Docker acts as a client for these services via the `docker search, pull, -login` and `push` commands. +Docker acts as a client for these services via the `docker search`, +`pull`, `login` and `push` commands. ## Repositories @@ -42,8 +41,8 @@ There are two types of public repositories: *top-level* repositories which are controlled by the Docker team, and *user* repositories created by individual contributors. Anyone can read from these repositories – they really help people get started quickly! You could also use -[*Trusted Builds*](#trusted-builds) if you need to keep -control of who accesses your images. +[*Trusted Builds*](#trusted-builds) if you need to keep control of who +accesses your images. - Top-level repositories can easily be recognized by **not** having a `/` (slash) in their name. These repositories represent trusted images @@ -74,7 +73,7 @@ user name or description: Search the docker index for images - -notrunc=false: Don᾿t truncate output + --no-trunc=false: Don᾿t truncate output $ sudo docker search centos Found 25 results matching your query ("centos") NAME DESCRIPTION @@ -83,12 +82,11 @@ user name or description: ... There you can see two example results: `centos` and -`slantview/centos-chef-solo`. The second result -shows that it comes from the public repository of a user, -`slantview/`, while the first result -(`centos`) doesn't explicitly list a repository so -it comes from the trusted top-level namespace. The `/` -character separates a user's repository and the image name. +`slantview/centos-chef-solo`. The second result shows that it comes from +the public repository of a user, `slantview/`, while the first result +(`centos`) doesn't explicitly list a repository so it comes from the +trusted top-level namespace. The `/` character separates a user's +repository and the image name. Once you have found the image name, you can download it: @@ -98,8 +96,8 @@ Once you have found the image name, you can download it: 539c0211cd76: Download complete What can you do with that image? Check out the -[*Examples*](/examples/#example-list) and, when you're ready with -your own image, come back here to learn how to share it. +[*Examples*](/examples/#example-list) and, when you're ready with your +own image, come back here to learn how to share it. ## Contributing to Docker.io @@ -114,10 +112,9 @@ first. You can create your username and login on This will prompt you for a username, which will become a public namespace for your public repositories. -If your username is available then `docker` will -also prompt you to enter a password and your e-mail address. It will -then automatically log you in. Now you're ready to commit and push your -own images! +If your username is available then `docker` will also prompt you to +enter a password and your e-mail address. It will then automatically log +you in. Now you're ready to commit and push your own images! > **Note:** > Your authentication credentials will be stored in the [`.dockercfg` @@ -149,17 +146,17 @@ or tag. ## Trusted Builds -Trusted Builds automate the building and updating of images from GitHub, -directly on `docker.io` servers. It works by adding -a commit hook to your selected repository, triggering a build and update -when you push a commit. +Trusted Builds automate the building and updating of images from GitHub +or BitBucket, directly on Docker.io. It works by adding a commit hook to +your selected repository, triggering a build and update when you push a +commit. ### To setup a trusted build 1. Create a [Docker.io account](https://index.docker.io/) and login. -2. Link your GitHub account through the `Link Accounts` menu. +2. Link your GitHub or BitBucket account through the [`Link Accounts`](https://index.docker.io/account/accounts/) menu. 3. [Configure a Trusted build](https://index.docker.io/builds/). -4. Pick a GitHub project that has a `Dockerfile` that you want to build. +4. Pick a GitHub or BitBucket project that has a `Dockerfile` that you want to build. 5. Pick the branch you want to build (the default is the `master` branch). 6. Give the Trusted Build a name. 7. Assign an optional Docker tag to the Build. @@ -168,17 +165,17 @@ when you push a commit. Once the Trusted Build is configured it will automatically trigger a build, and in a few minutes, if there are no errors, you will see your new trusted build on the [Docker.io](https://index.docker.io) Registry. -It will will stay in sync with your GitHub repo until you deactivate the -Trusted Build. +It will stay in sync with your GitHub and BitBucket repository until you +deactivate the Trusted Build. If you want to see the status of your Trusted Builds you can go to your -[Trusted Builds page](https://index.docker.io/builds/) on the Docker -index, and it will show you the status of your builds, and the build -history. +[Trusted Builds page](https://index.docker.io/builds/) on the Docker.io, +and it will show you the status of your builds, and the build history. -Once you`ve created a Trusted Build you can deactivate or delete it. You +Once you've created a Trusted Build you can deactivate or delete it. You cannot however push to a Trusted Build with the `docker push` command. -You can only manage it by committing code to your GitHub repository. +You can only manage it by committing code to your GitHub or BitBucket +repository. You can create multiple Trusted Builds per repository and configure them to point to specific `Dockerfile`'s or Git branches. @@ -206,9 +203,10 @@ identify a host), like this: Once a repository has your registry's host name as part of the tag, you can push and pull it like any other repository, but it will **not** be -searchable (or indexed at all) on [Docker.io](http://index.docker.io), and there will be -no user name checking performed. Your registry will function completely -independently from the [Docker.io](http://index.docker.io) registry. +searchable (or indexed at all) on [Docker.io](http://index.docker.io), +and there will be no user name checking performed. Your registry will +function completely independently from the +[Docker.io](http://index.docker.io) registry. @@ -219,15 +217,20 @@ http://blog.docker.io/2013/07/how-to-use-your-own-registry/) ## Authentication File -The authentication is stored in a json file, `.dockercfg` -located in your home directory. It supports multiple registry -urls. +The authentication is stored in a JSON file, `.dockercfg`, located in +your home directory. It supports multiple registry URLs. -`docker login` will create the "[https://index.docker.io/v1/]( -https://index.docker.io/v1/)" key. +The `docker login` command will create the: -`docker login https://my-registry.com` will create the -"[https://my-registry.com](https://my-registry.com)" key. + [https://index.docker.io/v1/](https://index.docker.io/v1/) + +key. + +The `docker login https://my-registry.com` command will create the: + + [https://my-registry.com](https://my-registry.com) + +key. For example: @@ -243,4 +246,6 @@ For example: } The `auth` field represents -`base64(:)` + + base64(:) + diff --git a/docs/theme/mkdocs/base.html b/docs/theme/mkdocs/base.html index ca418b3cd7..66bb2d3d68 100644 --- a/docs/theme/mkdocs/base.html +++ b/docs/theme/mkdocs/base.html @@ -67,5 +67,19 @@ + diff --git a/docs/theme/mkdocs/css/base.css b/docs/theme/mkdocs/css/base.css index 999a0dedbe..956e17a263 100644 --- a/docs/theme/mkdocs/css/base.css +++ b/docs/theme/mkdocs/css/base.css @@ -59,6 +59,11 @@ h6, padding: 0.5em 0.75em !important; line-height: 1.8em; background: #fff; + overflow-x: auto; +} +#content pre code { + word-wrap: normal; + white-space: pre; } #content blockquote { background: #fff; diff --git a/engine/engine.go b/engine/engine.go index 58b43eca04..5c3228d5d3 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -3,11 +3,12 @@ package engine import ( "bufio" "fmt" - "github.com/dotcloud/docker/utils" "io" "os" "sort" "strings" + + "github.com/dotcloud/docker/utils" ) // Installer is a standard interface for objects which can "install" themselves diff --git a/engine/env.go b/engine/env.go index f96795f48c..f63f29e10f 100644 --- a/engine/env.go +++ b/engine/env.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "sort" "strconv" "strings" ) @@ -252,134 +251,26 @@ func (env *Env) Map() map[string]string { return m } -type Table struct { - Data []*Env - sortKey string - Chan chan *Env -} - -func NewTable(sortKey string, sizeHint int) *Table { - return &Table{ - make([]*Env, 0, sizeHint), - sortKey, - make(chan *Env), +// MultiMap returns a representation of env as a +// map of string arrays, keyed by string. +// This is the same structure as http headers for example, +// which allow each key to have multiple values. +func (env *Env) MultiMap() map[string][]string { + m := make(map[string][]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + m[parts[0]] = append(m[parts[0]], parts[1]) } + return m } -func (t *Table) SetKey(sortKey string) { - t.sortKey = sortKey -} - -func (t *Table) Add(env *Env) { - t.Data = append(t.Data, env) -} - -func (t *Table) Len() int { - return len(t.Data) -} - -func (t *Table) Less(a, b int) bool { - return t.lessBy(a, b, t.sortKey) -} - -func (t *Table) lessBy(a, b int, by string) bool { - keyA := t.Data[a].Get(by) - keyB := t.Data[b].Get(by) - intA, errA := strconv.ParseInt(keyA, 10, 64) - intB, errB := strconv.ParseInt(keyB, 10, 64) - if errA == nil && errB == nil { - return intA < intB - } - return keyA < keyB -} - -func (t *Table) Swap(a, b int) { - tmp := t.Data[a] - t.Data[a] = t.Data[b] - t.Data[b] = tmp -} - -func (t *Table) Sort() { - sort.Sort(t) -} - -func (t *Table) ReverseSort() { - sort.Sort(sort.Reverse(t)) -} - -func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { - if _, err := dst.Write([]byte{'['}); err != nil { - return -1, err - } - n = 1 - for i, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - if i != len(t.Data)-1 { - if _, err := dst.Write([]byte{','}); err != nil { - return -1, err - } - n += 1 +// InitMultiMap removes all values in env, then initializes +// new values from the contents of m. +func (env *Env) InitMultiMap(m map[string][]string) { + (*env) = make([]string, 0, len(m)) + for k, vals := range m { + for _, v := range vals { + env.Set(k, v) } } - if _, err := dst.Write([]byte{']'}); err != nil { - return -1, err - } - return n + 1, nil -} - -func (t *Table) ToListString() (string, error) { - buffer := bytes.NewBuffer(nil) - if _, err := t.WriteListTo(buffer); err != nil { - return "", err - } - return buffer.String(), nil -} - -func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { - for _, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - } - return n, nil -} - -func (t *Table) ReadListFrom(src []byte) (n int64, err error) { - var array []interface{} - - if err := json.Unmarshal(src, &array); err != nil { - return -1, err - } - - for _, item := range array { - if m, ok := item.(map[string]interface{}); ok { - env := &Env{} - for key, value := range m { - env.SetAuto(key, value) - } - t.Add(env) - } - } - - return int64(len(src)), nil -} - -func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { - decoder := NewDecoder(src) - for { - env, err := decoder.Decode() - if err == io.EOF { - return 0, nil - } else if err != nil { - return -1, err - } - t.Add(env) - } - return 0, nil } diff --git a/engine/env_test.go b/engine/env_test.go index 0c66cea04e..39669d6780 100644 --- a/engine/env_test.go +++ b/engine/env_test.go @@ -123,3 +123,23 @@ func TestEnviron(t *testing.T) { t.Fatalf("bar not found in the environ") } } + +func TestMultiMap(t *testing.T) { + e := &Env{} + e.Set("foo", "bar") + e.Set("bar", "baz") + e.Set("hello", "world") + m := e.MultiMap() + e2 := &Env{} + e2.Set("old_key", "something something something") + e2.InitMultiMap(m) + if v := e2.Get("old_key"); v != "" { + t.Fatalf("%#v", v) + } + if v := e2.Get("bar"); v != "baz" { + t.Fatalf("%#v", v) + } + if v := e2.Get("hello"); v != "world" { + t.Fatalf("%#v", v) + } +} diff --git a/engine/job.go b/engine/job.go index b56155ac1c..ab8120dd44 100644 --- a/engine/job.go +++ b/engine/job.go @@ -1,6 +1,7 @@ package engine import ( + "bytes" "fmt" "io" "strings" @@ -56,8 +57,8 @@ func (job *Job) Run() error { defer func() { job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString()) }() - var errorMessage string - job.Stderr.AddString(&errorMessage) + var errorMessage = bytes.NewBuffer(nil) + job.Stderr.Add(errorMessage) if job.handler == nil { job.Errorf("%s: command not found", job.Name) job.status = 127 @@ -72,8 +73,11 @@ func (job *Job) Run() error { if err := job.Stderr.Close(); err != nil { return err } + if err := job.Stdin.Close(); err != nil { + return err + } if job.status != 0 { - return fmt.Errorf("%s", errorMessage) + return fmt.Errorf("%s", Tail(errorMessage, 1)) } return nil } diff --git a/engine/job_test.go b/engine/job_test.go index 1f927cbafc..67e723988e 100644 --- a/engine/job_test.go +++ b/engine/job_test.go @@ -1,6 +1,8 @@ package engine import ( + "bytes" + "fmt" "testing" ) @@ -40,13 +42,13 @@ func TestJobStdoutString(t *testing.T) { }) job := eng.Job("say_something_in_stdout") - var output string - if err := job.Stdout.AddString(&output); err != nil { - t.Fatal(err) - } + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } + fmt.Println(outputBuffer) + var output = Tail(outputBuffer, 1) if expectedOutput := "Hello world"; output != expectedOutput { t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } @@ -61,13 +63,12 @@ func TestJobStderrString(t *testing.T) { }) job := eng.Job("say_something_in_stderr") - var output string - if err := job.Stderr.AddString(&output); err != nil { - t.Fatal(err) - } + var outputBuffer = bytes.NewBuffer(nil) + job.Stderr.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } + var output = Tail(outputBuffer, 1) if expectedOutput := "Something happened"; output != expectedOutput { t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } diff --git a/engine/remote.go b/engine/remote.go index 60aad243c5..974ca02137 100644 --- a/engine/remote.go +++ b/engine/remote.go @@ -25,7 +25,9 @@ func (s *Sender) Install(eng *Engine) error { } func (s *Sender) Handle(job *Job) Status { - msg := data.Empty().Set("cmd", append([]string{job.Name}, job.Args...)...) + cmd := append([]string{job.Name}, job.Args...) + env := data.Encode(job.Env().MultiMap()) + msg := data.Empty().Set("cmd", cmd...).Set("env", env) peer, err := beam.SendConn(s, msg.Bytes()) if err != nil { return job.Errorf("beamsend: %v", err) @@ -36,20 +38,27 @@ func (s *Sender) Handle(job *Job) Status { r := beam.NewRouter(nil) r.NewRoute().KeyStartsWith("cmd", "log", "stdout").HasAttachment().Handler(func(p []byte, stdout *os.File) error { tasks.Add(1) - io.Copy(job.Stdout, stdout) - tasks.Done() + go func() { + io.Copy(job.Stdout, stdout) + stdout.Close() + tasks.Done() + }() return nil }) r.NewRoute().KeyStartsWith("cmd", "log", "stderr").HasAttachment().Handler(func(p []byte, stderr *os.File) error { tasks.Add(1) - io.Copy(job.Stderr, stderr) - tasks.Done() + go func() { + io.Copy(job.Stderr, stderr) + stderr.Close() + tasks.Done() + }() return nil }) r.NewRoute().KeyStartsWith("cmd", "log", "stdin").HasAttachment().Handler(func(p []byte, stdin *os.File) error { - tasks.Add(1) - io.Copy(stdin, job.Stdin) - tasks.Done() + go func() { + io.Copy(stdin, job.Stdin) + stdin.Close() + }() return nil }) var status int @@ -90,19 +99,28 @@ func (rcv *Receiver) Run() error { f.Close() return err } - cmd := data.Message(p).Get("cmd") + f.Close() + defer peer.Close() + msg := data.Message(p) + cmd := msg.Get("cmd") job := rcv.Engine.Job(cmd[0], cmd[1:]...) - stdout, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes()) + // Decode env + env, err := data.Decode(msg.GetOne("env")) + if err != nil { + return fmt.Errorf("error decoding 'env': %v", err) + } + job.Env().InitMultiMap(env) + stdout, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes()) if err != nil { return err } job.Stdout.Add(stdout) - stderr, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes()) + stderr, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes()) if err != nil { return err } job.Stderr.Add(stderr) - stdin, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes()) + stdin, err := beam.SendWPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes()) if err != nil { return err } diff --git a/engine/remote_test.go b/engine/remote_test.go index 54092ec934..e59ac78cc0 100644 --- a/engine/remote_test.go +++ b/engine/remote_test.go @@ -1,3 +1,163 @@ package engine -import () +import ( + "bufio" + "bytes" + "fmt" + "github.com/dotcloud/docker/pkg/beam" + "io" + "strings" + "testing" + "time" +) + +func TestHelloWorld(t *testing.T) { + for i := 0; i < 10; i++ { + testRemote(t, + + // Sender side + func(eng *Engine) { + job := eng.Job("echo", "hello", "world") + out := &bytes.Buffer{} + job.Stdout.Add(out) + job.Run() + if job.status != StatusOK { + t.Fatalf("#%v", job.StatusCode()) + } + lines := bufio.NewScanner(out) + var i int + for lines.Scan() { + if lines.Text() != "hello world" { + t.Fatalf("%#v", lines.Text()) + } + i++ + } + if i != 1000 { + t.Fatalf("%#v", i) + } + }, + + // Receiver side + func(eng *Engine) { + eng.Register("echo", func(job *Job) Status { + // Simulate more output with a delay in the middle + for i := 0; i < 500; i++ { + fmt.Fprintf(job.Stdout, "%s\n", strings.Join(job.Args, " ")) + } + time.Sleep(5 * time.Millisecond) + for i := 0; i < 500; i++ { + fmt.Fprintf(job.Stdout, "%s\n", strings.Join(job.Args, " ")) + } + return StatusOK + }) + }, + ) + } +} + +func TestStdin(t *testing.T) { + testRemote(t, + + func(eng *Engine) { + job := eng.Job("mirror") + job.Stdin.Add(strings.NewReader("hello world!\n")) + out := &bytes.Buffer{} + job.Stdout.Add(out) + if err := job.Run(); err != nil { + t.Fatal(err) + } + if out.String() != "hello world!\n" { + t.Fatalf("%#v", out.String()) + } + }, + + func(eng *Engine) { + eng.Register("mirror", func(job *Job) Status { + if _, err := io.Copy(job.Stdout, job.Stdin); err != nil { + t.Fatal(err) + } + return StatusOK + }) + }, + ) +} + +func TestEnv(t *testing.T) { + var ( + foo string + answer int + shadok_words []string + ) + testRemote(t, + + func(eng *Engine) { + job := eng.Job("sendenv") + job.Env().Set("foo", "bar") + job.Env().SetInt("answer", 42) + job.Env().SetList("shadok_words", []string{"ga", "bu", "zo", "meu"}) + if err := job.Run(); err != nil { + t.Fatal(err) + } + }, + + func(eng *Engine) { + eng.Register("sendenv", func(job *Job) Status { + foo = job.Env().Get("foo") + answer = job.Env().GetInt("answer") + shadok_words = job.Env().GetList("shadok_words") + return StatusOK + }) + }, + ) + // Check for results here rather than inside the job handler, + // otherwise the tests may incorrectly pass if the handler is not + // called. + if foo != "bar" { + t.Fatalf("%#v", foo) + } + if answer != 42 { + t.Fatalf("%#v", answer) + } + if strings.Join(shadok_words, ", ") != "ga, bu, zo, meu" { + t.Fatalf("%#v", shadok_words) + } +} + +// Helpers + +func testRemote(t *testing.T, senderSide, receiverSide func(*Engine)) { + sndConn, rcvConn, err := beam.USocketPair() + if err != nil { + t.Fatal(err) + } + defer sndConn.Close() + defer rcvConn.Close() + sender := NewSender(sndConn) + receiver := NewReceiver(rcvConn) + + // Setup the sender side + eng := New() + sender.Install(eng) + + // Setup the receiver side + receiverSide(receiver.Engine) + go receiver.Run() + + timeout(t, func() { + senderSide(eng) + }) +} + +func timeout(t *testing.T, f func()) { + onTimeout := time.After(100 * time.Millisecond) + onDone := make(chan bool) + go func() { + f() + close(onDone) + }() + select { + case <-onTimeout: + t.Fatalf("timeout") + case <-onDone: + } +} diff --git a/engine/streams.go b/engine/streams.go index 48f031de8f..99e876e17b 100644 --- a/engine/streams.go +++ b/engine/streams.go @@ -1,8 +1,7 @@ package engine import ( - "bufio" - "container/ring" + "bytes" "fmt" "io" "io/ioutil" @@ -16,6 +15,28 @@ type Output struct { used bool } +// Tail returns the n last lines of a buffer +// stripped out of the last \n, if any +// if n <= 0, returns an empty string +func Tail(buffer *bytes.Buffer, n int) string { + if n <= 0 { + return "" + } + bytes := buffer.Bytes() + if len(bytes) > 0 && bytes[len(bytes)-1] == '\n' { + bytes = bytes[:len(bytes)-1] + } + for i := buffer.Len() - 2; i >= 0; i-- { + if bytes[i] == '\n' { + n-- + if n == 0 { + return string(bytes[i+1:]) + } + } + } + return string(bytes) +} + // NewOutput returns a new Output object with no destinations attached. // Writing to an empty Output will cause the written data to be discarded. func NewOutput() *Output { @@ -58,42 +79,6 @@ func (o *Output) AddPipe() (io.Reader, error) { return r, nil } -// AddTail starts a new goroutine which will read all subsequent data written to the output, -// line by line, and append the last `n` lines to `dst`. -func (o *Output) AddTail(dst *[]string, n int) error { - src, err := o.AddPipe() - if err != nil { - return err - } - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - Tail(src, n, dst) - }() - return nil -} - -// AddString starts a new goroutine which will read all subsequent data written to the output, -// line by line, and store the last line into `dst`. -func (o *Output) AddString(dst *string) error { - src, err := o.AddPipe() - if err != nil { - return err - } - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - lines := make([]string, 0, 1) - Tail(src, 1, &lines) - if len(lines) == 0 { - *dst = "" - } else { - *dst = lines[0] - } - }() - return nil -} - // Write writes the same data to all registered destinations. // This method is thread-safe. func (o *Output) Write(p []byte) (n int, err error) { @@ -118,7 +103,7 @@ func (o *Output) Close() error { defer o.Unlock() var firstErr error for _, dst := range o.dests { - if closer, ok := dst.(io.WriteCloser); ok { + if closer, ok := dst.(io.Closer); ok { err := closer.Close() if err != nil && firstErr == nil { firstErr = err @@ -154,7 +139,7 @@ func (i *Input) Read(p []byte) (n int, err error) { // Not thread safe on purpose func (i *Input) Close() error { if i.src != nil { - if closer, ok := i.src.(io.WriteCloser); ok { + if closer, ok := i.src.(io.Closer); ok { return closer.Close() } } @@ -174,26 +159,6 @@ func (i *Input) Add(src io.Reader) error { return nil } -// Tail reads from `src` line per line, and returns the last `n` lines as an array. -// A ring buffer is used to only store `n` lines at any time. -func Tail(src io.Reader, n int, dst *[]string) { - scanner := bufio.NewScanner(src) - r := ring.New(n) - for scanner.Scan() { - if n == 0 { - continue - } - r.Value = scanner.Text() - r = r.Next() - } - r.Do(func(v interface{}) { - if v == nil { - return - } - *dst = append(*dst, v.(string)) - }) -} - // AddEnv starts a new goroutine which will decode all subsequent data // as a stream of json-encoded objects, and point `dst` to the last // decoded object. diff --git a/engine/streams_test.go b/engine/streams_test.go index 30d31d2952..83dd05c6f4 100644 --- a/engine/streams_test.go +++ b/engine/streams_test.go @@ -10,53 +10,6 @@ import ( "testing" ) -func TestOutputAddString(t *testing.T) { - var testInputs = [][2]string{ - { - "hello, world!", - "hello, world!", - }, - - { - "One\nTwo\nThree", - "Three", - }, - - { - "", - "", - }, - - { - "A line\nThen another nl-terminated line\n", - "Then another nl-terminated line", - }, - - { - "A line followed by an empty line\n\n", - "", - }, - } - for _, testData := range testInputs { - input := testData[0] - expectedOutput := testData[1] - o := NewOutput() - var output string - if err := o.AddString(&output); err != nil { - t.Error(err) - } - if n, err := o.Write([]byte(input)); err != nil { - t.Error(err) - } else if n != len(input) { - t.Errorf("Expected %d, got %d", len(input), n) - } - o.Close() - if output != expectedOutput { - t.Errorf("Last line is not stored as return string.\nInput: '%s'\nExpected: '%s'\nGot: '%s'", input, expectedOutput, output) - } - } -} - type sentinelWriteCloser struct { calledWrite bool calledClose bool @@ -145,59 +98,24 @@ func TestOutputAddPipe(t *testing.T) { } func TestTail(t *testing.T) { - var tests = make(map[string][][]string) - tests["hello, world!"] = [][]string{ - {}, - {"hello, world!"}, - {"hello, world!"}, - {"hello, world!"}, + var tests = make(map[string][]string) + tests["hello, world!"] = []string{ + "", + "hello, world!", + "hello, world!", + "hello, world!", } - tests["One\nTwo\nThree"] = [][]string{ - {}, - {"Three"}, - {"Two", "Three"}, - {"One", "Two", "Three"}, + tests["One\nTwo\nThree"] = []string{ + "", + "Three", + "Two\nThree", + "One\nTwo\nThree", } for input, outputs := range tests { for n, expectedOutput := range outputs { - var output []string - Tail(strings.NewReader(input), n, &output) - if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { - t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", expectedOutput, output) - } - } - } -} - -func TestOutputAddTail(t *testing.T) { - var tests = make(map[string][][]string) - tests["hello, world!"] = [][]string{ - {}, - {"hello, world!"}, - {"hello, world!"}, - {"hello, world!"}, - } - tests["One\nTwo\nThree"] = [][]string{ - {}, - {"Three"}, - {"Two", "Three"}, - {"One", "Two", "Three"}, - } - for input, outputs := range tests { - for n, expectedOutput := range outputs { - o := NewOutput() - var output []string - if err := o.AddTail(&output, n); err != nil { - t.Error(err) - } - if n, err := o.Write([]byte(input)); err != nil { - t.Error(err) - } else if n != len(input) { - t.Errorf("Expected %d, got %d", len(input), n) - } - o.Close() - if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { - t.Errorf("Tail(%d) returned wrong result.\nExpected: %v\nGot: %v", n, expectedOutput, output) + output := Tail(bytes.NewBufferString(input), n) + if output != expectedOutput { + t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output) } } } diff --git a/engine/table.go b/engine/table.go new file mode 100644 index 0000000000..292c4ed677 --- /dev/null +++ b/engine/table.go @@ -0,0 +1,141 @@ +package engine + +import ( + "bytes" + "encoding/json" + "io" + "sort" + "strconv" +) + +type Table struct { + Data []*Env + sortKey string + Chan chan *Env +} + +func NewTable(sortKey string, sizeHint int) *Table { + return &Table{ + make([]*Env, 0, sizeHint), + sortKey, + make(chan *Env), + } +} + +func (t *Table) SetKey(sortKey string) { + t.sortKey = sortKey +} + +func (t *Table) Add(env *Env) { + t.Data = append(t.Data, env) +} + +func (t *Table) Len() int { + return len(t.Data) +} + +func (t *Table) Less(a, b int) bool { + return t.lessBy(a, b, t.sortKey) +} + +func (t *Table) lessBy(a, b int, by string) bool { + keyA := t.Data[a].Get(by) + keyB := t.Data[b].Get(by) + intA, errA := strconv.ParseInt(keyA, 10, 64) + intB, errB := strconv.ParseInt(keyB, 10, 64) + if errA == nil && errB == nil { + return intA < intB + } + return keyA < keyB +} + +func (t *Table) Swap(a, b int) { + tmp := t.Data[a] + t.Data[a] = t.Data[b] + t.Data[b] = tmp +} + +func (t *Table) Sort() { + sort.Sort(t) +} + +func (t *Table) ReverseSort() { + sort.Sort(sort.Reverse(t)) +} + +func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { + if _, err := dst.Write([]byte{'['}); err != nil { + return -1, err + } + n = 1 + for i, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + if i != len(t.Data)-1 { + if _, err := dst.Write([]byte{','}); err != nil { + return -1, err + } + n += 1 + } + } + if _, err := dst.Write([]byte{']'}); err != nil { + return -1, err + } + return n + 1, nil +} + +func (t *Table) ToListString() (string, error) { + buffer := bytes.NewBuffer(nil) + if _, err := t.WriteListTo(buffer); err != nil { + return "", err + } + return buffer.String(), nil +} + +func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { + for _, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + } + return n, nil +} + +func (t *Table) ReadListFrom(src []byte) (n int64, err error) { + var array []interface{} + + if err := json.Unmarshal(src, &array); err != nil { + return -1, err + } + + for _, item := range array { + if m, ok := item.(map[string]interface{}); ok { + env := &Env{} + for key, value := range m { + env.SetAuto(key, value) + } + t.Add(env) + } + } + + return int64(len(src)), nil +} + +func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { + decoder := NewDecoder(src) + for { + env, err := decoder.Decode() + if err == io.EOF { + return 0, nil + } else if err != nil { + return -1, err + } + t.Add(env) + } + return 0, nil +} diff --git a/engine/table_test.go b/engine/table_test.go index 3e8e4ff1b3..9a32ac9cdb 100644 --- a/engine/table_test.go +++ b/engine/table_test.go @@ -26,3 +26,87 @@ func TestTableWriteTo(t *testing.T) { t.Fatalf("Inccorect output: %v", output) } } + +func TestTableSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.Sort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "B" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "C" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } +} + +func TestTableReverseSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.ReverseSort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "C" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "B" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } +} diff --git a/graph/graph.go b/graph/graph.go index b889139121..649b39b12f 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -2,12 +2,6 @@ package graph import ( "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon/graphdriver" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" @@ -17,6 +11,13 @@ import ( "strings" "syscall" "time" + + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" ) // A Graph is a store for versioned filesystem images and the relationship between them. @@ -141,11 +142,13 @@ func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, contain Architecture: runtime.GOARCH, OS: runtime.GOOS, } + if containerID != "" { img.Parent = containerImage img.Container = containerID img.ContainerConfig = *containerConfig } + if err := graph.Register(nil, layerData, img); err != nil { return nil, err } diff --git a/graph/service.go b/graph/service.go new file mode 100644 index 0000000000..881a199043 --- /dev/null +++ b/graph/service.go @@ -0,0 +1,171 @@ +package graph + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/utils" +) + +func (s *TagStore) Install(eng *engine.Engine) error { + eng.Register("image_set", s.CmdSet) + eng.Register("image_tag", s.CmdTag) + eng.Register("image_get", s.CmdGet) + eng.Register("image_inspect", s.CmdLookup) + eng.Register("image_tarlayer", s.CmdTarLayer) + return nil +} + +// CmdSet stores a new image in the graph. +// Images are stored in the graph using 4 elements: +// - A user-defined ID +// - A collection of metadata describing the image +// - A directory tree stored as a tar archive (also called the "layer") +// - A reference to a "parent" ID on top of which the layer should be applied +// +// NOTE: even though the parent ID is only useful in relation to the layer and how +// to apply it (ie you could represent the full directory tree as 'parent_layer + layer', +// it is treated as a top-level property of the image. This is an artifact of early +// design and should probably be cleaned up in the future to simplify the design. +// +// Syntax: image_set ID +// Input: +// - Layer content must be streamed in tar format on stdin. An empty input is +// valid and represents a nil layer. +// +// - Image metadata must be passed in the command environment. +// 'json': a json-encoded object with all image metadata. +// It will be stored as-is, without any encoding/decoding artifacts. +// That is a requirement of the current registry client implementation, +// because a re-encoded json might invalidate the image checksum at +// the next upload, even with functionaly identical content. +func (s *TagStore) CmdSet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + var ( + imgJSON = []byte(job.Getenv("json")) + layer = job.Stdin + ) + if len(imgJSON) == 0 { + return job.Errorf("mandatory key 'json' is not set") + } + // We have to pass an *image.Image object, even though it will be completely + // ignored in favor of the redundant json data. + // FIXME: the current prototype of Graph.Register is stupid and redundant. + img, err := image.NewImgJSON(imgJSON) + if err != nil { + return job.Error(err) + } + if err := s.graph.Register(imgJSON, layer, img); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// CmdTag assigns a new name and tag to an existing image. If the tag already exists, +// it is changed and the image previously referenced by the tag loses that reference. +// This may cause the old image to be garbage-collected if its reference count reaches zero. +// +// Syntax: image_tag NEWNAME OLDNAME +// Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0 +func (s *TagStore) CmdTag(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name) + } + var ( + newName = job.Args[0] + oldName = job.Args[1] + ) + newRepo, newTag := utils.ParseRepositoryTag(newName) + // FIXME: Set should either parse both old and new name, or neither. + // the current prototype is inconsistent. + if err := s.Set(newRepo, newTag, oldName, true); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// CmdGet returns information about an image. +// If the image doesn't exist, an empty object is returned, to allow +// checking for an image's existence. +func (s *TagStore) CmdGet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + res := &engine.Env{} + img, err := s.LookupImage(name) + // Note: if the image doesn't exist, LookupImage returns + // nil, nil. + if err != nil { + return job.Error(err) + } + if img != nil { + // We don't directly expose all fields of the Image objects, + // to maintain a clean public API which we can maintain over + // time even if the underlying structure changes. + // We should have done this with the Image object to begin with... + // but we didn't, so now we're doing it here. + // + // Fields that we're probably better off not including: + // - Config/ContainerConfig. Those structs have the same sprawl problem, + // so we shouldn't include them wholesale either. + // - Comment: initially created to fulfill the "every image is a git commit" + // metaphor, in practice people either ignore it or use it as a + // generic description field which it isn't. On deprecation shortlist. + res.Set("created", fmt.Sprintf("%v", img.Created)) + res.Set("author", img.Author) + res.Set("os", img.OS) + res.Set("architecture", img.Architecture) + res.Set("docker_version", img.DockerVersion) + res.Set("ID", img.ID) + res.Set("Parent", img.Parent) + } + res.WriteTo(job.Stdout) + return engine.StatusOK +} + +// CmdLookup return an image encoded in JSON +func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + b, err := json.Marshal(image) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} + +// CmdTarLayer return the tarLayer of the image +func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + fs, err := image.TarLayer() + if err != nil { + return job.Error(err) + } + defer fs.Close() + + if written, err := io.Copy(job.Stdout, fs); err != nil { + return job.Error(err) + } else { + utils.Debugf("rendered layer for %s of [%d] size", image.ID, written) + } + + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} diff --git a/hack/MAINTAINERS.md b/hack/MAINTAINERS.md index be3117c864..9dbdf99d9a 100644 --- a/hack/MAINTAINERS.md +++ b/hack/MAINTAINERS.md @@ -53,14 +53,17 @@ All decisions affecting docker, big and small, follow the same 3 steps: * Step 2: Discuss the pull request. Anyone can do this. -* Step 3: Accept or refuse a pull request. The relevant maintainer does this (see below "Who decides what?") +* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do +this (see below "Who decides what?") ## Who decides what? -So all decisions are pull requests, and the relevant maintainer makes -the decision by accepting or refusing the pull request. But how do we -identify the relevant maintainer for a given pull request? +All decisions are pull requests, and the relevant maintainers make +decisions by accepting or refusing the pull request. Review and acceptance +by anyone is denoted by adding a comment in the pull request: `LGTM`. +However, only currently listed `MAINTAINERS` are counted towards the required +majority. Docker follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for @@ -70,19 +73,22 @@ decisions are made by default by Solomon. Since making every decision myself would be highly un-scalable, in practice decisions are spread across multiple maintainers. -The relevant maintainer for a pull request is assigned in 3 steps: +The relevant maintainers for a pull request can be worked out in 2 steps: -* Step 1: Determine the subdirectory affected by the pull request. This +* Step 1: Determine the subdirectories affected by the pull request. This might be `src/registry`, `docs/source/api`, or any other part of the repo. * Step 2: Find the `MAINTAINERS` file which affects this directory. If the directory itself does not have a `MAINTAINERS` file, work your way up the repo hierarchy until you find one. -* Step 3: The first maintainer listed is the primary maintainer. The - pull request is assigned to him. He may assign it to other listed - maintainers, at his discretion. +There is also a `hacks/getmaintainers.sh` script that will print out the +maintainers for a specified directory. +### I'm a maintainer, and I'm going on holiday + +Please let your co-maintainers and other contributors know by raising a pull +request that comments out your `MAINTAINERS` file entry using a `#`. ### I'm a maintainer, should I make pull requests too? @@ -91,7 +97,7 @@ made through a pull request. ### Who assigns maintainers? -Solomon. +Solomon has final `LGTM` approval for all pull requests to `MAINTAINERS` files. ### How is this process changed? diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 9edb4a3e14..82d959c9e2 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -297,7 +297,7 @@ the client will even run on alternative platforms such as Mac OS X / Darwin. Some of Docker's features are activated by using optional command-line flags or by having support for them in the kernel or userspace. A few examples include: -* LXC execution driver (requires version 0.8 or later of the LXC utility scripts) +* LXC execution driver (requires version 1.0 or later of the LXC utility scripts) * AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at least the "auplink" utility from aufs-tools) * experimental BTRFS graph driver (requires BTRFS support enabled in the kernel) diff --git a/hack/dind b/hack/dind index df2baa2757..a9de03e4ff 100755 --- a/hack/dind +++ b/hack/dind @@ -14,7 +14,7 @@ set -e export container=docker # First, make sure that cgroups are mounted correctly. -CGROUP=/sys/fs/cgroup +CGROUP=/cgroup mkdir -p "$CGROUP" diff --git a/hack/getmaintainer.sh b/hack/getmaintainer.sh index 2c24bacc89..ca532d42ec 100755 --- a/hack/getmaintainer.sh +++ b/hack/getmaintainer.sh @@ -1,4 +1,5 @@ -#!/bin/sh +#!/usr/bin/env bash +set -e if [ $# -ne 1 ]; then echo >&2 "Usage: $0 PATH" @@ -34,6 +35,7 @@ while true; do fi done; } < MAINTAINERS + break fi if [ -d .git ]; then break @@ -46,13 +48,15 @@ done PRIMARY="${MAINTAINERS[0]}" PRIMARY_FIRSTNAME=$(echo $PRIMARY | cut -d' ' -f1) +LGTM_COUNT=${#MAINTAINERS[@]} +LGTM_COUNT=$((LGTM_COUNT%2 +1)) firstname() { echo $1 | cut -d' ' -f1 } -echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1. Assign pull requests to him." -echo "$(firstname $PRIMARY) may assign pull requests to the following secondary maintainers:" +echo "A pull request in $1 will need $LGTM_COUNT LGTM's to be merged." +echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1." for SECONDARY in "${MAINTAINERS[@]:1}"; do echo "--- $SECONDARY" done diff --git a/hack/make/test-integration b/hack/make/test-integration index 4c2bccaead..baad1349a2 100644 --- a/hack/make/test-integration +++ b/hack/make/test-integration @@ -10,6 +10,6 @@ bundle_test_integration() { # this "grep" hides some really irritating warnings that "go test -coverpkg" # spews when it is given packages that aren't used +exec > >(tee -a $DEST/test.log) 2>&1 bundle_test_integration 2>&1 \ - | grep --line-buffered -v '^warning: no packages being tested depend on ' \ - | tee $DEST/test.log + | grep --line-buffered -v '^warning: no packages being tested depend on ' diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index f2128a26ac..837bd8737a 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -11,6 +11,7 @@ bundle_test_integration_cli() { } # subshell so that we can export PATH without breaking other things +exec > >(tee -a $DEST/test.log) 2>&1 ( export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" @@ -40,4 +41,4 @@ bundle_test_integration_cli() { DOCKERD_PID=$(set -x; cat $DEST/docker.pid) ( set -x; kill $DOCKERD_PID ) wait $DOCKERD_PID || true -) 2>&1 | tee $DEST/test.log +) diff --git a/hack/make/test-unit b/hack/make/test-unit index 066865859c..552810f349 100644 --- a/hack/make/test-unit +++ b/hack/make/test-unit @@ -49,7 +49,8 @@ bundle_test_unit() { echo true fi - } 2>&1 | tee $DEST/test.log + } } +exec > >(tee -a $DEST/test.log) 2>&1 bundle_test_unit diff --git a/hack/vendor.sh b/hack/vendor.sh index 79322cd9af..8084f2eb9d 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -39,13 +39,13 @@ clone() { echo done } -clone git github.com/kr/pty 98c7b80083 +clone git github.com/kr/pty 67e2db24c8 -clone git github.com/gorilla/context 708054d61e5 +clone git github.com/gorilla/context b06ed15e1c -clone git github.com/gorilla/mux 9b36453141c +clone git github.com/gorilla/mux 136d54f81f -clone git github.com/syndtr/gocapability 3454319be2 +clone git github.com/syndtr/gocapability 3c85049eae clone hg code.google.com/p/go.net 84a4013f96e0 @@ -53,11 +53,11 @@ clone hg code.google.com/p/gosqlite 74691fb6f837 # get Go tip's archive/tar, for xattr support # TODO after Go 1.3 drops, bump our minimum supported version and drop this vendored dep -clone hg code.google.com/p/go a15f344a9efa +clone hg code.google.com/p/go 3458ba248590 mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar rm -rf src/code.google.com/p/go mkdir -p src/code.google.com/p/go/src/pkg/archive mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar clone git github.com/godbus/dbus v1 -clone git github.com/coreos/go-systemd v1 +clone git github.com/coreos/go-systemd v2 diff --git a/integration-cli/MAINTAINERS b/integration-cli/MAINTAINERS new file mode 100644 index 0000000000..53c8a11858 --- /dev/null +++ b/integration-cli/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (github: unclejack) diff --git a/integration-cli/build_tests/TestAdd/DirContentToExistDir/Dockerfile b/integration-cli/build_tests/TestAdd/DirContentToExistDir/Dockerfile new file mode 100644 index 0000000000..6ab0e98f49 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/DirContentToExistDir/Dockerfile @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] diff --git a/integration-cli/build_tests/TestAdd/DirContentToExistDir/test_dir/test_file b/integration-cli/build_tests/TestAdd/DirContentToExistDir/test_dir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestAdd/DirContentToRoot/Dockerfile b/integration-cli/build_tests/TestAdd/DirContentToRoot/Dockerfile new file mode 100644 index 0000000000..03a9c052fd --- /dev/null +++ b/integration-cli/build_tests/TestAdd/DirContentToRoot/Dockerfile @@ -0,0 +1,8 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestAdd/DirContentToRoot/test_dir/test_file b/integration-cli/build_tests/TestAdd/DirContentToRoot/test_dir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestAdd/EtcToRoot/Dockerfile b/integration-cli/build_tests/TestAdd/EtcToRoot/Dockerfile new file mode 100644 index 0000000000..58c75b00f3 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/EtcToRoot/Dockerfile @@ -0,0 +1,2 @@ +FROM scratch +ADD . / diff --git a/integration-cli/build_tests/TestAdd/SingleFileToExistDir/Dockerfile b/integration-cli/build_tests/TestAdd/SingleFileToExistDir/Dockerfile new file mode 100644 index 0000000000..fefbd09f0c --- /dev/null +++ b/integration-cli/build_tests/TestAdd/SingleFileToExistDir/Dockerfile @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestAdd/SingleFileToExistDir/test_file b/integration-cli/build_tests/TestAdd/SingleFileToExistDir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/Dockerfile b/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/Dockerfile new file mode 100644 index 0000000000..661990b7f4 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/test_file b/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestAdd/SingleFileToRoot/Dockerfile b/integration-cli/build_tests/TestAdd/SingleFileToRoot/Dockerfile new file mode 100644 index 0000000000..561dbe9c55 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/SingleFileToRoot/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestAdd/SingleFileToWorkdir/Dockerfile b/integration-cli/build_tests/TestAdd/SingleFileToWorkdir/Dockerfile new file mode 100644 index 0000000000..3f076718f2 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/SingleFileToWorkdir/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD test_file . diff --git a/integration-cli/build_tests/TestAdd/WholeDirToRoot/Dockerfile b/integration-cli/build_tests/TestAdd/WholeDirToRoot/Dockerfile new file mode 100644 index 0000000000..03e9ac0b1c --- /dev/null +++ b/integration-cli/build_tests/TestAdd/WholeDirToRoot/Dockerfile @@ -0,0 +1,11 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestBuildCacheADD/1/Dockerfile b/integration-cli/build_tests/TestBuildCacheADD/1/Dockerfile new file mode 100644 index 0000000000..7287771992 --- /dev/null +++ b/integration-cli/build_tests/TestBuildCacheADD/1/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD https://index.docker.io/robots.txt / diff --git a/integration-cli/build_tests/TestBuildCacheADD/2/Dockerfile b/integration-cli/build_tests/TestBuildCacheADD/2/Dockerfile new file mode 100644 index 0000000000..afe79b84b6 --- /dev/null +++ b/integration-cli/build_tests/TestBuildCacheADD/2/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD http://example.com/index.html / diff --git a/integration-cli/build_tests/TestBuildForceRm/Dockerfile b/integration-cli/build_tests/TestBuildForceRm/Dockerfile new file mode 100644 index 0000000000..8468edd4ce --- /dev/null +++ b/integration-cli/build_tests/TestBuildForceRm/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox +RUN true +RUN thiswillfail diff --git a/integration-cli/build_tests/TestBuildRm/Dockerfile b/integration-cli/build_tests/TestBuildRm/Dockerfile new file mode 100644 index 0000000000..190eacf117 --- /dev/null +++ b/integration-cli/build_tests/TestBuildRm/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox +ADD foo / +ADD foo / + diff --git a/integration-cli/build_tests/TestBuildRm/foo b/integration-cli/build_tests/TestBuildRm/foo new file mode 100644 index 0000000000..5716ca5987 --- /dev/null +++ b/integration-cli/build_tests/TestBuildRm/foo @@ -0,0 +1 @@ +bar diff --git a/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile b/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile index 89b66f4f1d..6a2bcab301 100644 --- a/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile +++ b/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile @@ -1,60 +1,60 @@ -FROM busybox -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" +FROM scratch +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / diff --git a/integration-cli/build_tests/TestBuildSixtySteps/foo b/integration-cli/build_tests/TestBuildSixtySteps/foo new file mode 100644 index 0000000000..7898192261 --- /dev/null +++ b/integration-cli/build_tests/TestBuildSixtySteps/foo @@ -0,0 +1 @@ +a diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/Dockerfile b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/Dockerfile new file mode 100644 index 0000000000..0964b8e87c --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD . /foo/ diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/directoryWeCantStat/bar b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/directoryWeCantStat/bar new file mode 100644 index 0000000000..257cc5642c --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/directoryWeCantStat/bar @@ -0,0 +1 @@ +foo diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/Dockerfile b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/Dockerfile new file mode 100644 index 0000000000..0964b8e87c --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD . /foo/ diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/fileWithoutReadAccess b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/fileWithoutReadAccess new file mode 100644 index 0000000000..b25f9a2a19 --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/fileWithoutReadAccess @@ -0,0 +1 @@ +should make `docker build` throw an error diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/Dockerfile b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/Dockerfile new file mode 100644 index 0000000000..0964b8e87c --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD . /foo/ diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/g b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/g new file mode 120000 index 0000000000..5fc3f33923 --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/g @@ -0,0 +1 @@ +../../../../../../../../../../../../../../../../../../../azA \ No newline at end of file diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 7cd42dc69c..4bbe4c6dc3 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2,11 +2,45 @@ package main import ( "fmt" + "os" "os/exec" "path/filepath" + "strings" "testing" + "time" ) +func TestBuildCacheADD(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildCacheADD", "1") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcacheadd1", ".") + buildCmd.Dir = buildDirectory + exitCode, err := runCommand(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + buildDirectory = filepath.Join(workingDirectory, "build_tests", "TestBuildCacheADD", "2") + buildCmd = exec.Command(dockerBinary, "build", "-t", "testcacheadd2", ".") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + if strings.Contains(out, "Using cache") { + t.Fatal("2nd build used cache on ADD, it shouldn't") + } + + deleteImages("testcacheadd1") + deleteImages("testcacheadd2") + + logDone("build - build two images with ADD") +} + func TestBuildSixtySteps(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildSixtySteps") buildCmd := exec.Command(dockerBinary, "build", "-t", "foobuildsixtysteps", ".") @@ -23,6 +57,362 @@ func TestBuildSixtySteps(t *testing.T) { logDone("build - build an image with sixty build steps") } +func TestAddSingleFileToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd", "SingleFileToRoot") + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", ".") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add single file to root") +} + +// Issue #3960: "ADD src ." hangs +func TestAddSingleFileToWorkdir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd", "SingleFileToWorkdir") + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", ".") + buildCmd.Dir = buildDirectory + done := make(chan error) + go func() { + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + done <- fmt.Errorf("build failed to complete: %s %v", out, err) + return + } + done <- nil + }() + select { + case <-time.After(5 * time.Second): + if err := buildCmd.Process.Kill(); err != nil { + fmt.Printf("could not kill build (pid=%d): %v\n", buildCmd.Process.Pid, err) + } + t.Fatal("build timed out") + case err := <-done: + if err != nil { + t.Fatal(err) + } + } + + deleteImages("testaddimg") + + logDone("build - add single file to workdir") +} + +func TestAddSingleFileToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "SingleFileToExistDir") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add single file to existing dir") +} + +func TestAddSingleFileToNonExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "SingleFileToNonExistDir") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add single file to non-existing dir") +} + +func TestAddDirContentToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "DirContentToRoot") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add directory contents to root") +} + +func TestAddDirContentToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "DirContentToExistDir") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add directory contents to existing dir") +} + +func TestAddWholeDirToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd", "WholeDirToRoot") + test_dir := filepath.Join(buildDirectory, "test_dir") + if err := os.MkdirAll(test_dir, 0755); err != nil { + t.Fatal(err) + } + f, err := os.OpenFile(filepath.Join(test_dir, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", ".") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add whole directory to root") +} + +func TestAddEtcToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "EtcToRoot") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + logDone("build - add etc directory to root") +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func TestBuildWithInaccessibleFilesInContext(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildWithInaccessibleFilesInContext") + + { + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToInaccessibleFileBuildDirectory := filepath.Join(buildDirectory, "inaccessiblefile") + pathToFileWithoutReadAccess := filepath.Join(pathToInaccessibleFileBuildDirectory, "fileWithoutReadAccess") + + err := os.Chown(pathToFileWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown file to root: %s", err)) + err = os.Chmod(pathToFileWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 700: %s", err)) + + buildCommandStatement := fmt.Sprintf("%s build -t inaccessiblefiles .", dockerBinary) + buildCmd := exec.Command("su", "unprivilegeduser", "-c", buildCommandStatement) + buildCmd.Dir = pathToInaccessibleFileBuildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + if err == nil || exitCode == 0 { + t.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "no permission to read from ") { + t.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + } + + if !strings.Contains(out, "Error checking context is accessible") { + t.Fatalf("output should've contained the string: Error checking context is accessible") + } + } + { + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToInaccessibleDirectoryBuildDirectory := filepath.Join(buildDirectory, "inaccessibledirectory") + pathToDirectoryWithoutReadAccess := filepath.Join(pathToInaccessibleDirectoryBuildDirectory, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) + err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) + errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) + err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + + buildCommandStatement := fmt.Sprintf("%s build -t inaccessiblefiles .", dockerBinary) + buildCmd := exec.Command("su", "unprivilegeduser", "-c", buildCommandStatement) + buildCmd.Dir = pathToInaccessibleDirectoryBuildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + if err == nil || exitCode == 0 { + t.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "can't stat") { + t.Fatalf("output should've contained the string: can't access %s", out) + } + + if !strings.Contains(out, "Error checking context is accessible") { + t.Fatalf("output should've contained the string: Error checking context is accessible") + } + + } + { + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + pathToDirectoryWhichContainsLinks := filepath.Join(buildDirectory, "linksdirectory") + + buildCmd := exec.Command(dockerBinary, "build", "-t", "testlinksok", ".") + buildCmd.Dir = pathToDirectoryWhichContainsLinks + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + t.Fatalf("build should have worked: %s %s", err, out) + } + + deleteImages("testlinksok") + + } + deleteImages("inaccessiblefiles") + logDone("build - ADD from context with inaccessible files must fail") + logDone("build - ADD from context with accessible links must work") +} + +func TestBuildForceRm(t *testing.T) { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildForceRm") + buildCmd := exec.Command(dockerBinary, "build", "--force-rm", ".") + buildCmd.Dir = buildDirectory + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err == nil || exitCode == 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("--force-rm shouldn't have left containers behind") + } + + logDone("build - ensure --force-rm doesn't leave containers behind") +} + +func TestBuildRm(t *testing.T) { + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildRm") + buildCmd := exec.Command(dockerBinary, "build", "--rm", "-t", "testbuildrm", ".") + buildCmd.Dir = buildDirectory + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("-rm shouldn't have left containers behind") + } + deleteImages("testbuildrm") + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildRm") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testbuildrm", ".") + buildCmd.Dir = buildDirectory + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("--rm shouldn't have left containers behind") + } + deleteImages("testbuildrm") + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildRm") + buildCmd := exec.Command(dockerBinary, "build", "--rm=false", "-t", "testbuildrm", ".") + buildCmd.Dir = buildDirectory + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore == containerCountAfter { + t.Fatalf("--rm=false should have left containers behind") + } + deleteAllContainers() + deleteImages("testbuildrm") + + } + + logDone("build - ensure --rm doesn't leave containers behind and that --rm=true is the default") + logDone("build - ensure --rm=false overrides the default") +} + // TODO: TestCaching // TODO: TestADDCacheInvalidation diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go index e99379231e..c02c89cd30 100644 --- a/integration-cli/docker_cli_commit_test.go +++ b/integration-cli/docker_cli_commit_test.go @@ -62,3 +62,49 @@ func TestCommitNewFile(t *testing.T) { logDone("commit - commit file and read") } + +func TestCommitTTY(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "tty", "ttytest") + imageId, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + imageId = strings.Trim(imageId, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "ttytest", "/bin/ls") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } +} + +func TestCommitWithHostBindMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "bind-commit", "bindtest") + imageId, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + imageId = strings.Trim(imageId, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "bindtest", "true") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + deleteImages(imageId) + + logDone("commit - commit bind mounted file") +} diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go new file mode 100644 index 0000000000..7421ed0fa1 --- /dev/null +++ b/integration-cli/docker_cli_cp_test.go @@ -0,0 +1,245 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func TestCpGarbagePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("../../../../../../../../../../../../", cpFullPath) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- garbage path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for garbage path") + } + + logDone("cp - garbage paths relative to container's rootfs") +} + +// Check that relative paths are relative to the container's rootfs +func TestCpRelativePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path, _ := filepath.Rel("/", cpFullPath) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- relative path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for relative path") + } + + logDone("cp - relative paths relative to container's rootfs") +} + +// Check that absolute paths are relative to the container's rootfs +func TestCpAbsolutePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- absolute path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for absolute path") + } + + logDone("cp - absolute paths relative to container's rootfs") +} + +// Check that cp with unprivileged user doesn't return any error +func TestCpUnprivilegedUser(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpdir) + + if err = os.Chmod(tmpdir, 0777); err != nil { + t.Fatal(err) + } + + path := cpTestName + + _, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+cleanedContainerID+":"+path+" "+tmpdir)) + if err != nil { + t.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err) + } + + logDone("cp - unprivileged user") +} diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index 55c41e0bbc..0480183bc7 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -2,10 +2,47 @@ package main import ( "fmt" + "github.com/dotcloud/docker/pkg/iptables" + "io/ioutil" + "os" "os/exec" + "strings" "testing" ) +func TestEtcHostsRegularFile(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if !strings.HasPrefix(out, "-") { + t.Errorf("/etc/hosts should be a regular file") + } + + deleteAllContainers() + + logDone("link - /etc/hosts is a regular file") +} + +func TestEtcHostsContentMatch(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hosts") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + t.Skip("/etc/hosts does not exist, skip this test") + } + + if out != string(hosts) { + t.Errorf("container") + } + + deleteAllContainers() + + logDone("link - /etc/hosts matches hosts copy") +} + func TestPingUnlinkedContainers(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") exitCode, err := runCommand(runCmd) @@ -28,3 +65,28 @@ func TestPingLinkedContainers(t *testing.T) { cmd(t, "kill", idB) deleteAllContainers() } + +func TestIpTablesRulesWhenLinkAndUnlink(t *testing.T) { + cmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") + + childIp := findContainerIp(t, "child") + parentIp := findContainerIp(t, "parent") + + sourceRule := []string{"FORWARD", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", childIp, "--sport", "80", "-d", parentIp, "-j", "ACCEPT"} + destinationRule := []string{"FORWARD", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", parentIp, "--dport", "80", "-d", childIp, "-j", "ACCEPT"} + if !iptables.Exists(sourceRule...) || !iptables.Exists(destinationRule...) { + t.Fatal("Iptables rules not found") + } + + cmd(t, "rm", "--link", "parent/http") + if iptables.Exists(sourceRule...) || iptables.Exists(destinationRule...) { + t.Fatal("Iptables rules should be removed when unlink") + } + + cmd(t, "kill", "child") + cmd(t, "kill", "parent") + deleteAllContainers() + + logDone("link - verify iptables when link and unlink") +} diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go index 90af933be9..3816e54050 100644 --- a/integration-cli/docker_cli_nat_test.go +++ b/integration-cli/docker_cli_nat_test.go @@ -3,22 +3,14 @@ package main import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/daemon" "net" "os/exec" - "path/filepath" "testing" + + "github.com/dotcloud/docker/daemon" ) func TestNetworkNat(t *testing.T) { - ncPath, err := exec.LookPath("nc") - if err != nil { - t.Skip("Test not running with `make test`. Netcat not found: %s", err) - } - ncPath, err = filepath.EvalSymlinks(ncPath) - if err != nil { - t.Fatalf("Error resolving netcat symlink: %s", err) - } iface, err := net.InterfaceByName("eth0") if err != nil { t.Skip("Test not running with `make test`. Interface eth0 not found: %s", err) @@ -34,10 +26,7 @@ func TestNetworkNat(t *testing.T) { t.Fatalf("Error retrieving the up for eth0: %s", err) } - runCmd := exec.Command(dockerBinary, "run", "-d", - "-v", ncPath+":/bin/nc", - "-v", "/lib/x86_64-linux-gnu/libc.so.6:/lib/libc.so.6", "-v", "/lib/x86_64-linux-gnu/libresolv.so.2:/lib/libresolv.so.2", "-v", "/lib/x86_64-linux-gnu/libbsd.so.0:/lib/libbsd.so.0", "-v", "/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2:/lib/ld-linux-x86-64.so.2", - "-p", "8080", "busybox", "/bin/nc", "-lp", "8080") + runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "8080", "busybox", "nc", "-lp", "8080") out, _, err := runCommandWithOutput(runCmd) errorOut(err, t, fmt.Sprintf("run1 failed with errors: %v (%s)", err, out)) @@ -60,10 +49,7 @@ func TestNetworkNat(t *testing.T) { t.Fatal("Port 8080/tcp not found in NetworkSettings") } - runCmd = exec.Command(dockerBinary, "run", - "-v", ncPath+":/bin/nc", - "-v", "/lib/x86_64-linux-gnu/libc.so.6:/lib/libc.so.6", "-v", "/lib/x86_64-linux-gnu/libresolv.so.2:/lib/libresolv.so.2", "-v", "/lib/x86_64-linux-gnu/libbsd.so.0:/lib/libbsd.so.0", "-v", "/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2:/lib/ld-linux-x86-64.so.2", - "-p", "8080", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | /bin/nc -w 30 %s %s", ifaceIp, port8080[0].HostPort)) + runCmd = exec.Command(dockerBinary, "run", "-p", "8080", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s %s", ifaceIp, port8080[0].HostPort)) out, _, err = runCommandWithOutput(runCmd) errorOut(err, t, fmt.Sprintf("run2 failed with errors: %v (%s)", err, out)) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index b9737feeea..b72d8e32ca 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -251,13 +251,13 @@ func TestDockerRunWorkingDirectory(t *testing.T) { // pinging Google's DNS resolver should fail when we disable the networking func TestDockerRunWithoutNetworking(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ping", "-c", "1", "8.8.8.8") + runCmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "8.8.8.8") out, _, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 1 { t.Fatal(out, err) } if exitCode != 1 { - t.Errorf("--networking=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + t.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") } runCmd = exec.Command(dockerBinary, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8") @@ -271,7 +271,7 @@ func TestDockerRunWithoutNetworking(t *testing.T) { deleteAllContainers() - logDone("run - disable networking with --networking=false") + logDone("run - disable networking with --net=none") logDone("run - disable networking with -n=false") } @@ -438,7 +438,7 @@ func TestCreateVolume(t *testing.T) { deleteAllContainers() - logDone("run - create docker mangaed volume") + logDone("run - create docker managed volume") } // Test that creating a volume with a symlink in its path works correctly. Test for #5152. @@ -544,6 +544,51 @@ func TestUserByID(t *testing.T) { logDone("run - user by id") } +func TestUserByIDBig(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "2147483648", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + t.Fatalf("expected error about uids range, got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, id too big") +} + +func TestUserByIDNegative(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "-1", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + t.Fatalf("expected error about uids range, got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, id negative") +} + +func TestUserByIDZero(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "0", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + t.Fatalf("expected daemon user got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, zero uid") +} + func TestUserNotFound(t *testing.T) { cmd := exec.Command(dockerBinary, "run", "-u", "notme", "busybox", "id") @@ -633,7 +678,7 @@ func TestContainerNetwork(t *testing.T) { // Issue #4681 func TestLoopbackWhenNetworkDisabled(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ping", "-c", "1", "127.0.0.1") + cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } @@ -644,18 +689,29 @@ func TestLoopbackWhenNetworkDisabled(t *testing.T) { } func TestLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ip", "a", "show", "up") + cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } - interfaces := regexp.MustCompile(`(?m)^[0-9]+: [a-zA-Z0-9]+`).FindAllString(out, -1) - if len(interfaces) != 1 { - t.Fatalf("Wrong interface count in test container: expected [*: lo], got %s", interfaces) + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } } - if !strings.HasSuffix(interfaces[0], ": lo") { - t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces) + + if count != 1 { + t.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + t.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) } deleteAllContainers() @@ -768,3 +824,14 @@ func TestProcWritableInPrivilegedContainers(t *testing.T) { logDone("run - proc writable in privileged container") } + +func TestRunWithCpuset(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cpuset", "0", "busybox", "true") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("container should run successfuly with cpuset of 0: %s", err) + } + + deleteAllContainers() + + logDone("run - cpuset 0") +} diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go index f18d5bede6..7f1838e5d9 100644 --- a/integration-cli/docker_cli_version_test.go +++ b/integration-cli/docker_cli_version_test.go @@ -24,9 +24,8 @@ func TestVersionEnsureSucceeds(t *testing.T) { "Git commit (client):", "Server version:", "Server API version:", - "Git commit (server):", "Go version (server):", - "Last stable version:", + "Git commit (server):", } for _, linePrefix := range stringsToCheck { diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go index f8bd5c116b..78e0685a9d 100644 --- a/integration-cli/docker_test_vars.go +++ b/integration-cli/docker_test_vars.go @@ -1,7 +1,9 @@ package main import ( + "fmt" "os" + "os/exec" ) // the docker binary to use @@ -18,6 +20,15 @@ var workingDirectory string func init() { if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { dockerBinary = dockerBin + } else { + whichCmd := exec.Command("which", "docker") + out, _, err := runCommandWithOutput(whichCmd) + if err == nil { + dockerBinary = stripTrailingCharacters(out) + } else { + fmt.Printf("ERROR: couldn't resolve full path to the Docker binary") + os.Exit(1) + } } if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { registryImageName = registryImage diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 6da86c9753..660d509e76 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -3,6 +3,7 @@ package main import ( "fmt" "os/exec" + "strconv" "strings" "testing" ) @@ -61,3 +62,38 @@ func cmd(t *testing.T, args ...string) (string, int, error) { errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out)) return out, status, err } + +func findContainerIp(t *testing.T, id string) string { + cmd := exec.Command(dockerBinary, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + return strings.Trim(out, " \r\n'") +} + +func getContainerCount() (int, error) { + const containers = "Containers:" + + cmd := exec.Command(dockerBinary, "info") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return 0, err + } + + lines := strings.Split(out, "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := stripTrailingCharacters(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + if err != nil { + return 0, err + } + return containerCount, nil + } + } + return 0, fmt.Errorf("couldn't find the Container count in the output") +} diff --git a/integration/api_test.go b/integration/api_test.go index 04611dfe3d..969e0fbaf2 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -536,7 +536,6 @@ func TestGetContainersByName(t *testing.T) { func TestPostCommit(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) // Create a container and remove a file containerID := createTestContainer(eng, @@ -567,7 +566,7 @@ func TestPostCommit(t *testing.T) { if err := env.Decode(r.Body); err != nil { t.Fatal(err) } - if _, err := srv.ImageInspect(env.Get("Id")); err != nil { + if err := eng.Job("image_inspect", env.Get("Id")).Run(); err != nil { t.Fatalf("The image has not been committed") } } diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index 81580ec98c..e113cdf512 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -1,19 +1,22 @@ package docker import ( + "bytes" + "encoding/json" "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/server" - "github.com/dotcloud/docker/utils" "io/ioutil" "net" "net/http" "net/http/httptest" "strings" "testing" + + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/server" + "github.com/dotcloud/docker/utils" ) // A testContextTemplate describes a build context and how to test it @@ -394,13 +397,21 @@ func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, u } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil) + buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) id, err := buildfile.Build(context.Archive(dockerfile, t)) if err != nil { return nil, err } - return srv.ImageInspect(id) + job := eng.Job("image_inspect", id) + buffer := bytes.NewBuffer(nil) + image := &image.Image{} + job.Stdout.Add(buffer) + if err := job.Run(); err != nil { + return nil, err + } + err = json.NewDecoder(buffer).Decode(image) + return image, err } func TestVolume(t *testing.T) { @@ -828,7 +839,7 @@ func TestForbiddenContextPath(t *testing.T) { } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil) + buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { @@ -874,7 +885,7 @@ func TestBuildADDFileNotFound(t *testing.T) { } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := server.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil) + buildfile := server.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { diff --git a/integration/commands_test.go b/integration/commands_test.go index 5b967b68cc..2ee5842e1c 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -3,12 +3,6 @@ package docker import ( "bufio" "fmt" - "github.com/dotcloud/docker/api/client" - "github.com/dotcloud/docker/daemon" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" @@ -19,6 +13,13 @@ import ( "syscall" "testing" "time" + + "github.com/dotcloud/docker/api/client" + "github.com/dotcloud/docker/daemon" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/utils" ) func closeWrap(args ...io.Closer) error { @@ -1051,11 +1052,12 @@ func TestContainerOrphaning(t *testing.T) { if err := cli.CmdBuild("-t", image, tmpDir); err != nil { t.Fatal(err) } - img, err := srv.ImageInspect(image) - if err != nil { + job := globalEngine.Job("image_get", image) + info, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { t.Fatal(err) } - return img.ID + return info.Get("ID") } // build an image diff --git a/integration/runtime_test.go b/integration/runtime_test.go index c84ea5bed2..9c59d38e01 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -3,6 +3,19 @@ package docker import ( "bytes" "fmt" + "io" + "log" + "net" + "net/url" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "testing" + "time" + "github.com/dotcloud/docker/daemon" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/image" @@ -10,18 +23,6 @@ import ( "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" - "io" - "log" - "net" - "net/url" - "os" - "path/filepath" - goruntime "runtime" - "strconv" - "strings" - "syscall" - "testing" - "time" ) const ( @@ -127,7 +128,7 @@ func init() { spawnGlobalDaemon() spawnLegitHttpsDaemon() spawnRogueHttpsDaemon() - startFds, startGoroutines = utils.GetTotalUsedFds(), goruntime.NumGoroutine() + startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() } func setupBaseImage() { @@ -421,13 +422,14 @@ func TestGet(t *testing.T) { func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) { var ( - err error - id string - strPort string - eng = NewTestEngine(t) - daemon = mkDaemonFromEngine(eng, t) - port = 5554 - p nat.Port + err error + id string + outputBuffer = bytes.NewBuffer(nil) + strPort string + eng = NewTestEngine(t) + daemon = mkDaemonFromEngine(eng, t) + port = 5554 + p nat.Port ) defer func() { if err != nil { @@ -455,10 +457,11 @@ func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daem jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) jobCreate.SetenvJson("ExposedPorts", ep) - jobCreate.Stdout.AddString(&id) + jobCreate.Stdout.Add(outputBuffer) if err := jobCreate.Run(); err != nil { t.Fatal(err) } + id = engine.Tail(outputBuffer, 1) // FIXME: this relies on the undocumented behavior of daemon.Create // which will return a nil error AND container if the exposed ports // are invalid. That behavior should be fixed! @@ -720,12 +723,12 @@ func TestContainerNameValidation(t *testing.T) { t.Fatal(err) } - var shortID string + var outputBuffer = bytes.NewBuffer(nil) job := eng.Job("create", test.Name) if err := job.ImportEnv(config); err != nil { t.Fatal(err) } - job.Stdout.AddString(&shortID) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { if !test.Valid { continue @@ -733,7 +736,7 @@ func TestContainerNameValidation(t *testing.T) { t.Fatal(err) } - container := daemon.Get(shortID) + container := daemon.Get(engine.Tail(outputBuffer, 1)) if container.Name != "/"+test.Name { t.Fatalf("Expect /%s got %s", test.Name, container.Name) diff --git a/integration/server_test.go b/integration/server_test.go index 226247556d..3752c9b7e6 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -1,12 +1,14 @@ package docker import ( - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/server" + "bytes" "strings" "testing" "time" + + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/server" ) func TestCreateNumberHostname(t *testing.T) { @@ -70,22 +72,22 @@ func TestMergeConfigOnCommit(t *testing.T) { job.Setenv("repo", "testrepo") job.Setenv("tag", "testtag") job.SetenvJson("config", config) - var newId string - job.Stdout.AddString(&newId) + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Error(err) } - container2, _, _ := mkContainer(runtime, []string{newId}, t) + container2, _, _ := mkContainer(runtime, []string{engine.Tail(outputBuffer, 1)}, t) defer runtime.Destroy(container2) - job = eng.Job("inspect", container1.Name, "container") + job = eng.Job("container_inspect", container1.Name) baseContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) } - job = eng.Job("inspect", container2.Name, "container") + job = eng.Job("container_inspect", container2.Name) commitContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) @@ -168,8 +170,6 @@ func TestRestartKillWait(t *testing.T) { setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() { job = srv.Eng.Job("wait", outs.Data[0].Get("Id")) - var statusStr string - job.Stdout.AddString(&statusStr) if err := job.Run(); err != nil { t.Fatal(err) } @@ -266,8 +266,6 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) { job.Setenv("Memory", "524287") job.Setenv("CpuShares", "1000") job.SetenvList("Cmd", []string{"/bin/cat"}) - var id string - job.Stdout.AddString(&id) if err := job.Run(); err == nil { t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!") } @@ -302,13 +300,13 @@ func TestRmi(t *testing.T) { job = eng.Job("commit", containerID) job.Setenv("repo", "test") - var imageID string - job.Stdout.AddString(&imageID) + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } - if err := eng.Job("tag", imageID, "test", "0.1").Run(); err != nil { + if err := eng.Job("tag", engine.Tail(outputBuffer, 1), "test", "0.1").Run(); err != nil { t.Fatal(err) } @@ -339,7 +337,7 @@ func TestRmi(t *testing.T) { t.Fatalf("Expected 2 new images, found %d.", images.Len()-initialImages.Len()) } - if err = srv.DeleteImage(imageID, engine.NewTable("", 0), true, false, false); err != nil { + if err = srv.DeleteImage(engine.Tail(outputBuffer, 1), engine.NewTable("", 0), true, false, false); err != nil { t.Fatal(err) } diff --git a/integration/utils_test.go b/integration/utils_test.go index 6901662ce6..d8101dfb1d 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -3,7 +3,6 @@ package docker import ( "bytes" "fmt" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "net/http" @@ -14,6 +13,8 @@ import ( "testing" "time" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "github.com/dotcloud/docker/builtins" "github.com/dotcloud/docker/daemon" "github.com/dotcloud/docker/engine" @@ -42,11 +43,12 @@ func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f ut if err := job.ImportEnv(config); err != nil { f.Fatal(err) } - job.Stdout.AddString(&shortId) + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { f.Fatal(err) } - return + return engine.Tail(outputBuffer, 1) } func createTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler) (shortId string) { diff --git a/pkg/beam/beam.go b/pkg/beam/beam.go index b1e4667a3f..2e8895a153 100644 --- a/pkg/beam/beam.go +++ b/pkg/beam/beam.go @@ -29,17 +29,48 @@ type ReceiveSender interface { Sender } -func SendPipe(dst Sender, data []byte) (*os.File, error) { +const ( + R = iota + W +) + +func sendPipe(dst Sender, data []byte, mode int) (*os.File, error) { r, w, err := os.Pipe() if err != nil { return nil, err } - if err := dst.Send(data, r); err != nil { - r.Close() - w.Close() + var ( + remote *os.File + local *os.File + ) + if mode == R { + remote = r + local = w + } else if mode == W { + remote = w + local = r + } + if err := dst.Send(data, remote); err != nil { + local.Close() + remote.Close() return nil, err } - return w, nil + return local, nil + +} + +// SendRPipe create a pipe and sends its *read* end attached in a beam message +// to `dst`, with `data` as the message payload. +// It returns the *write* end of the pipe, or an error. +func SendRPipe(dst Sender, data []byte) (*os.File, error) { + return sendPipe(dst, data, R) +} + +// SendWPipe create a pipe and sends its *read* end attached in a beam message +// to `dst`, with `data` as the message payload. +// It returns the *write* end of the pipe, or an error. +func SendWPipe(dst Sender, data []byte) (*os.File, error) { + return sendPipe(dst, data, W) } func SendConn(dst Sender, data []byte) (conn *UnixConn, err error) { diff --git a/pkg/beam/data/message.go b/pkg/beam/data/message.go index 193fb7b241..0ebe90295a 100644 --- a/pkg/beam/data/message.go +++ b/pkg/beam/data/message.go @@ -72,6 +72,16 @@ func (m Message) Get(k string) []string { return v } +// GetOne returns the last value added at the key k, +// or an empty string if there is no value. +func (m Message) GetOne(k string) string { + var v string + if vals := m.Get(k); len(vals) > 0 { + v = vals[len(vals)-1] + } + return v +} + func (m Message) Pretty() string { data, err := Decode(string(m)) if err != nil { diff --git a/pkg/beam/data/message_test.go b/pkg/beam/data/message_test.go index 7685769069..7224f33d11 100644 --- a/pkg/beam/data/message_test.go +++ b/pkg/beam/data/message_test.go @@ -51,3 +51,11 @@ func TestSetDelMessage(t *testing.T) { t.Fatalf("'%v' != '%v'", output, expectedOutput) } } + +func TestGetOne(t *testing.T) { + m := Empty().Set("shadok words", "ga", "bu", "zo", "meu") + val := m.GetOne("shadok words") + if val != "meu" { + t.Fatalf("%#v", val) + } +} diff --git a/pkg/beam/examples/beamsh/beamsh.go b/pkg/beam/examples/beamsh/beamsh.go index 3f258de332..808f038c68 100644 --- a/pkg/beam/examples/beamsh/beamsh.go +++ b/pkg/beam/examples/beamsh/beamsh.go @@ -257,12 +257,12 @@ func Handlers(sink beam.Sender) (*beam.UnixConn, error) { if handler == nil { return } - stdout, err := beam.SendPipe(conn, data.Empty().Set("cmd", "log", "stdout").Set("fromcmd", cmd...).Bytes()) + stdout, err := beam.SendRPipe(conn, data.Empty().Set("cmd", "log", "stdout").Set("fromcmd", cmd...).Bytes()) if err != nil { return } defer stdout.Close() - stderr, err := beam.SendPipe(conn, data.Empty().Set("cmd", "log", "stderr").Set("fromcmd", cmd...).Bytes()) + stderr, err := beam.SendRPipe(conn, data.Empty().Set("cmd", "log", "stderr").Set("fromcmd", cmd...).Bytes()) if err != nil { return } diff --git a/pkg/beam/examples/beamsh/builtins.go b/pkg/beam/examples/beamsh/builtins.go index cc94d2b5fb..3242237cc1 100644 --- a/pkg/beam/examples/beamsh/builtins.go +++ b/pkg/beam/examples/beamsh/builtins.go @@ -272,7 +272,7 @@ func CmdPrint(args []string, stdout, stderr io.Writer, in beam.Receiver, out bea } // Skip commands if a != nil && data.Message(payload).Get("cmd") == nil { - dup, err := beam.SendPipe(out, payload) + dup, err := beam.SendRPipe(out, payload) if err != nil { a.Close() return diff --git a/pkg/beam/router.go b/pkg/beam/router.go index fc41a8991b..15910e95b1 100644 --- a/pkg/beam/router.go +++ b/pkg/beam/router.go @@ -78,7 +78,7 @@ func (route *Route) Tee(dst Sender) *Route { return inner(payload, attachment) } // Setup the tee - w, err := SendPipe(dst, payload) + w, err := SendRPipe(dst, payload) if err != nil { return err } diff --git a/pkg/cgroups/fs/cpuset.go b/pkg/cgroups/fs/cpuset.go deleted file mode 100644 index 8a13c56cea..0000000000 --- a/pkg/cgroups/fs/cpuset.go +++ /dev/null @@ -1,36 +0,0 @@ -package fs - -import ( - "os" -) - -type cpusetGroup struct { -} - -func (s *cpusetGroup) Set(d *data) error { - // we don't want to join this cgroup unless it is specified - if d.c.CpusetCpus != "" { - dir, err := d.join("cpuset") - if err != nil && d.c.CpusetCpus != "" { - return err - } - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - if err := writeFile(dir, "cpuset.cpus", d.c.CpusetCpus); err != nil { - return err - } - } - return nil -} - -func (s *cpusetGroup) Remove(d *data) error { - return removePath(d.path("cpuset")) -} - -func (s *cpusetGroup) Stats(d *data) (map[string]float64, error) { - return nil, ErrNotSupportStat -} diff --git a/pkg/cgroups/systemd/apply_nosystemd.go b/pkg/cgroups/systemd/apply_nosystemd.go deleted file mode 100644 index 4faa749745..0000000000 --- a/pkg/cgroups/systemd/apply_nosystemd.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package systemd - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/cgroups" -) - -func UseSystemd() bool { - return false -} - -func Apply(c *Cgroup, pid int) (cgroups.ActiveCgroup, error) { - return nil, fmt.Errorf("Systemd not supported") -} diff --git a/pkg/collections/orderedintset.go b/pkg/collections/orderedintset.go index 23abab04d3..7442f2e93f 100644 --- a/pkg/collections/orderedintset.go +++ b/pkg/collections/orderedintset.go @@ -1,12 +1,13 @@ package collections import ( + "sort" "sync" ) // OrderedIntSet is a thread-safe sorted set and a stack. type OrderedIntSet struct { - sync.RWMutex + sync.Mutex set []int } @@ -15,29 +16,22 @@ func NewOrderedIntSet() *OrderedIntSet { return &OrderedIntSet{} } -// Push takes a string and adds it to the set. If the elem aready exists, it has no effect. +// Push takes an int and adds it to the set. If the elem aready exists, it has no effect. func (s *OrderedIntSet) Push(elem int) { - s.RLock() - for _, e := range s.set { - if e == elem { - s.RUnlock() - return - } - } - s.RUnlock() - s.Lock() + if len(s.set) == 0 { + s.set = append(s.set, elem) + s.Unlock() + return + } // Make sure the list is always sorted - for i, e := range s.set { - if elem < e { - s.set = append(s.set[:i], append([]int{elem}, s.set[i:]...)...) - s.Unlock() - return - } + i := sort.SearchInts(s.set, elem) + if i < len(s.set) && s.set[i] == elem { + s.Unlock() + return } - // If we reach here, then elem is the biggest elem of the list. - s.set = append(s.set, elem) + s.set = append(s.set[:i], append([]int{elem}, s.set[i:]...)...) s.Unlock() } @@ -46,28 +40,26 @@ func (s *OrderedIntSet) Pop() int { return s.PopFront() } -// Pop returns the first elemen from the list and removes it. +// Pop returns the first element from the list and removes it. // If the list is empty, it returns 0 func (s *OrderedIntSet) PopFront() int { - s.RLock() - - for i, e := range s.set { - ret := e - s.RUnlock() - s.Lock() - s.set = append(s.set[:i], s.set[i+1:]...) + s.Lock() + if len(s.set) == 0 { s.Unlock() - return ret + return 0 } - s.RUnlock() - - return 0 + ret := s.set[0] + s.set = s.set[1:] + s.Unlock() + return ret } // PullBack retrieve the last element of the list. // The element is not removed. // If the list is empty, an empty element is returned. func (s *OrderedIntSet) PullBack() int { + s.Lock() + defer s.Unlock() if len(s.set) == 0 { return 0 } @@ -76,21 +68,28 @@ func (s *OrderedIntSet) PullBack() int { // Exists checks if the given element present in the list. func (s *OrderedIntSet) Exists(elem int) bool { - for _, e := range s.set { - if e == elem { - return true - } + s.Lock() + if len(s.set) == 0 { + s.Unlock() + return false } - return false + i := sort.SearchInts(s.set, elem) + res := i < len(s.set) && s.set[i] == elem + s.Unlock() + return res } // Remove removes an element from the list. // If the element is not found, it has no effect. func (s *OrderedIntSet) Remove(elem int) { - for i, e := range s.set { - if e == elem { - s.set = append(s.set[:i], s.set[i+1:]...) - return - } + s.Lock() + if len(s.set) == 0 { + s.Unlock() + return } + i := sort.SearchInts(s.set, elem) + if i < len(s.set) && s.set[i] == elem { + s.set = append(s.set[:i], s.set[i+1:]...) + } + s.Unlock() } diff --git a/pkg/collections/orderedintset_test.go b/pkg/collections/orderedintset_test.go new file mode 100644 index 0000000000..0ac4ca5455 --- /dev/null +++ b/pkg/collections/orderedintset_test.go @@ -0,0 +1,71 @@ +package collections + +import ( + "math/rand" + "testing" +) + +func BenchmarkPush(b *testing.B) { + var testSet []int + for i := 0; i < 1000; i++ { + testSet = append(testSet, rand.Int()) + } + s := NewOrderedIntSet() + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, elem := range testSet { + s.Push(elem) + } + } +} + +func BenchmarkPop(b *testing.B) { + var testSet []int + for i := 0; i < 1000; i++ { + testSet = append(testSet, rand.Int()) + } + s := NewOrderedIntSet() + for _, elem := range testSet { + s.Push(elem) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 1000; j++ { + s.Pop() + } + } +} + +func BenchmarkExist(b *testing.B) { + var testSet []int + for i := 0; i < 1000; i++ { + testSet = append(testSet, rand.Intn(2000)) + } + s := NewOrderedIntSet() + for _, elem := range testSet { + s.Push(elem) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 1000; j++ { + s.Exists(j) + } + } +} + +func BenchmarkRemove(b *testing.B) { + var testSet []int + for i := 0; i < 1000; i++ { + testSet = append(testSet, rand.Intn(2000)) + } + s := NewOrderedIntSet() + for _, elem := range testSet { + s.Push(elem) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 1000; j++ { + s.Remove(j) + } + } +} diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index 4cdd67ef7c..b44c452233 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -20,6 +20,7 @@ const ( var ( ErrIptablesNotFound = errors.New("Iptables not found") nat = []string{"-t", "nat"} + supportsXlock = false ) type Chain struct { @@ -27,6 +28,10 @@ type Chain struct { Bridge string } +func init() { + supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil +} + func NewChain(name, bridge string) (*Chain, error) { if output, err := Raw("-t", "nat", "-N", name); err != nil { return nil, err @@ -147,12 +152,19 @@ func Raw(args ...string) ([]byte, error) { if err != nil { return nil, ErrIptablesNotFound } + + if supportsXlock { + args = append([]string{"--wait"}, args...) + } + if os.Getenv("DEBUG") != "" { fmt.Printf("[DEBUG] [iptables]: %s, %v\n", path, args) } + output, err := exec.Command(path, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err) } + return output, err } diff --git a/pkg/cgroups/cgroups.go b/pkg/libcontainer/cgroups/cgroups.go similarity index 100% rename from pkg/cgroups/cgroups.go rename to pkg/libcontainer/cgroups/cgroups.go diff --git a/pkg/cgroups/cgroups_test.go b/pkg/libcontainer/cgroups/cgroups_test.go similarity index 100% rename from pkg/cgroups/cgroups_test.go rename to pkg/libcontainer/cgroups/cgroups_test.go diff --git a/pkg/cgroups/fs/apply_raw.go b/pkg/libcontainer/cgroups/fs/apply_raw.go similarity index 75% rename from pkg/cgroups/fs/apply_raw.go rename to pkg/libcontainer/cgroups/fs/apply_raw.go index 5f9fc826b3..be500781ec 100644 --- a/pkg/cgroups/fs/apply_raw.go +++ b/pkg/libcontainer/cgroups/fs/apply_raw.go @@ -7,7 +7,7 @@ import ( "path/filepath" "strconv" - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) var ( @@ -26,7 +26,7 @@ var ( type subsystem interface { Set(*data) error Remove(*data) error - Stats(*data) (map[string]float64, error) + Stats(*data) (map[string]int64, error) } type data struct { @@ -74,7 +74,7 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { return d, nil } -func GetStats(c *cgroups.Cgroup, subsystem string, pid int) (map[string]float64, error) { +func GetStats(c *cgroups.Cgroup, subsystem string, pid int) (map[string]int64, error) { cgroupRoot, err := cgroups.FindCgroupMountpoint("cpu") if err != nil { return nil, err @@ -103,12 +103,50 @@ func GetStats(c *cgroups.Cgroup, subsystem string, pid int) (map[string]float64, return sys.Stats(d) } -func (raw *data) path(subsystem string) (string, error) { +func GetPids(c *cgroups.Cgroup) ([]int, error) { + cgroupRoot, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return nil, err + } + cgroupRoot = filepath.Dir(cgroupRoot) + + if _, err := os.Stat(cgroupRoot); err != nil { + return nil, fmt.Errorf("cgroup root %s not found", cgroupRoot) + } + + cgroup := c.Name + if c.Parent != "" { + cgroup = filepath.Join(c.Parent, cgroup) + } + + d := &data{ + root: cgroupRoot, + cgroup: cgroup, + c: c, + } + + dir, err := d.path("devices") + if err != nil { + return nil, err + } + + return cgroups.ReadProcsFile(dir) +} + +func (raw *data) parent(subsystem string) (string, error) { initPath, err := cgroups.GetInitCgroupDir(subsystem) if err != nil { return "", err } - return filepath.Join(raw.root, subsystem, initPath, raw.cgroup), nil + return filepath.Join(raw.root, subsystem, initPath), nil +} + +func (raw *data) path(subsystem string) (string, error) { + parent, err := raw.parent(subsystem) + if err != nil { + return "", err + } + return filepath.Join(parent, raw.cgroup), nil } func (raw *data) join(subsystem string) (string, error) { diff --git a/pkg/cgroups/fs/blkio.go b/pkg/libcontainer/cgroups/fs/blkio.go similarity index 87% rename from pkg/cgroups/fs/blkio.go rename to pkg/libcontainer/cgroups/fs/blkio.go index 79e14fa2dc..5cbef69f55 100644 --- a/pkg/cgroups/fs/blkio.go +++ b/pkg/libcontainer/cgroups/fs/blkio.go @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) type blkioGroup struct { @@ -57,9 +57,9 @@ examples: 8:0 Total 0 Total 0 */ -func (s *blkioGroup) Stats(d *data) (map[string]float64, error) { +func (s *blkioGroup) Stats(d *data) (map[string]int64, error) { var ( - paramData = make(map[string]float64) + paramData = make(map[string]int64) params = []string{ "io_service_bytes_recursive", "io_serviced_recursive", @@ -91,7 +91,7 @@ func (s *blkioGroup) Stats(d *data) (map[string]float64, error) { fields := strings.Fields(sc.Text()) switch len(fields) { case 3: - v, err := strconv.ParseFloat(fields[2], 64) + v, err := strconv.ParseInt(fields[2], 10, 64) if err != nil { return nil, err } @@ -106,7 +106,7 @@ func (s *blkioGroup) Stats(d *data) (map[string]float64, error) { return paramData, nil } -func (s *blkioGroup) getSectors(path string) (string, float64, error) { +func (s *blkioGroup) getSectors(path string) (string, int64, error) { f, err := os.Open(filepath.Join(path, "blkio.sectors_recursive")) if err != nil { return "", 0, err diff --git a/pkg/cgroups/fs/blkio_test.go b/pkg/libcontainer/cgroups/fs/blkio_test.go similarity index 84% rename from pkg/cgroups/fs/blkio_test.go rename to pkg/libcontainer/cgroups/fs/blkio_test.go index 5279ac437b..d0244ad716 100644 --- a/pkg/cgroups/fs/blkio_test.go +++ b/pkg/libcontainer/cgroups/fs/blkio_test.go @@ -43,29 +43,29 @@ func TestBlkioStats(t *testing.T) { } // Verify expected stats. - expectedStats := map[string]float64{ - "blkio.sectors_recursive:8:0": 1024.0, + expectedStats := map[string]int64{ + "blkio.sectors_recursive:8:0": 1024, // Serviced bytes. - "io_service_bytes_recursive:8:0:Read": 100.0, - "io_service_bytes_recursive:8:0:Write": 400.0, - "io_service_bytes_recursive:8:0:Sync": 200.0, - "io_service_bytes_recursive:8:0:Async": 300.0, - "io_service_bytes_recursive:8:0:Total": 500.0, + "io_service_bytes_recursive:8:0:Read": 100, + "io_service_bytes_recursive:8:0:Write": 400, + "io_service_bytes_recursive:8:0:Sync": 200, + "io_service_bytes_recursive:8:0:Async": 300, + "io_service_bytes_recursive:8:0:Total": 500, // Serviced requests. - "io_serviced_recursive:8:0:Read": 10.0, - "io_serviced_recursive:8:0:Write": 40.0, - "io_serviced_recursive:8:0:Sync": 20.0, - "io_serviced_recursive:8:0:Async": 30.0, - "io_serviced_recursive:8:0:Total": 50.0, + "io_serviced_recursive:8:0:Read": 10, + "io_serviced_recursive:8:0:Write": 40, + "io_serviced_recursive:8:0:Sync": 20, + "io_serviced_recursive:8:0:Async": 30, + "io_serviced_recursive:8:0:Total": 50, // Queued requests. - "io_queued_recursive:8:0:Read": 1.0, - "io_queued_recursive:8:0:Write": 4.0, - "io_queued_recursive:8:0:Sync": 2.0, - "io_queued_recursive:8:0:Async": 3.0, - "io_queued_recursive:8:0:Total": 5.0, + "io_queued_recursive:8:0:Read": 1, + "io_queued_recursive:8:0:Write": 4, + "io_queued_recursive:8:0:Sync": 2, + "io_queued_recursive:8:0:Async": 3, + "io_queued_recursive:8:0:Total": 5, } expectStats(t, expectedStats, stats) } diff --git a/pkg/cgroups/fs/cpu.go b/pkg/libcontainer/cgroups/fs/cpu.go similarity index 91% rename from pkg/cgroups/fs/cpu.go rename to pkg/libcontainer/cgroups/fs/cpu.go index 6a7f66c72d..ad3078b3b8 100644 --- a/pkg/cgroups/fs/cpu.go +++ b/pkg/libcontainer/cgroups/fs/cpu.go @@ -39,8 +39,8 @@ func (s *cpuGroup) Remove(d *data) error { return removePath(d.path("cpu")) } -func (s *cpuGroup) Stats(d *data) (map[string]float64, error) { - paramData := make(map[string]float64) +func (s *cpuGroup) Stats(d *data) (map[string]int64, error) { + paramData := make(map[string]int64) path, err := d.path("cpu") if err != nil { return nil, err diff --git a/pkg/cgroups/fs/cpu_test.go b/pkg/libcontainer/cgroups/fs/cpu_test.go similarity index 89% rename from pkg/cgroups/fs/cpu_test.go rename to pkg/libcontainer/cgroups/fs/cpu_test.go index 698ae921d8..cacf2f4ced 100644 --- a/pkg/cgroups/fs/cpu_test.go +++ b/pkg/libcontainer/cgroups/fs/cpu_test.go @@ -20,10 +20,10 @@ func TestCpuStats(t *testing.T) { t.Fatal(err) } - expected_stats := map[string]float64{ - "nr_periods": 2000.0, - "nr_throttled": 200.0, - "throttled_time": 42424242424.0, + expected_stats := map[string]int64{ + "nr_periods": 2000, + "nr_throttled": 200, + "throttled_time": 42424242424, } expectStats(t, expected_stats, stats) } diff --git a/pkg/cgroups/fs/cpuacct.go b/pkg/libcontainer/cgroups/fs/cpuacct.go similarity index 71% rename from pkg/cgroups/fs/cpuacct.go rename to pkg/libcontainer/cgroups/fs/cpuacct.go index 892b5ab6b1..c52049f3e9 100644 --- a/pkg/cgroups/fs/cpuacct.go +++ b/pkg/libcontainer/cgroups/fs/cpuacct.go @@ -10,13 +10,13 @@ import ( "strings" "time" - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" "github.com/dotcloud/docker/pkg/system" ) var ( - cpuCount = float64(runtime.NumCPU()) - clockTicks = float64(system.GetClockTicks()) + cpuCount = int64(runtime.NumCPU()) + clockTicks = int64(system.GetClockTicks()) ) type cpuacctGroup struct { @@ -34,11 +34,11 @@ func (s *cpuacctGroup) Remove(d *data) error { return removePath(d.path("cpuacct")) } -func (s *cpuacctGroup) Stats(d *data) (map[string]float64, error) { +func (s *cpuacctGroup) Stats(d *data) (map[string]int64, error) { var ( - startCpu, lastCpu, startSystem, lastSystem, startUsage, lastUsage float64 - percentage float64 - paramData = make(map[string]float64) + startCpu, lastCpu, startSystem, lastSystem, startUsage, lastUsage int64 + percentage int64 + paramData = make(map[string]int64) ) path, err := d.path("cpuacct") if startCpu, err = s.getCpuUsage(d, path); err != nil { @@ -48,7 +48,7 @@ func (s *cpuacctGroup) Stats(d *data) (map[string]float64, error) { return nil, err } startUsageTime := time.Now() - if startUsage, err = getCgroupParamFloat64(path, "cpuacct.usage"); err != nil { + if startUsage, err = getCgroupParamInt(path, "cpuacct.usage"); err != nil { return nil, err } // sample for 100ms @@ -60,7 +60,7 @@ func (s *cpuacctGroup) Stats(d *data) (map[string]float64, error) { return nil, err } usageSampleDuration := time.Since(startUsageTime) - if lastUsage, err = getCgroupParamFloat64(path, "cpuacct.usage"); err != nil { + if lastUsage, err = getCgroupParamInt(path, "cpuacct.usage"); err != nil { return nil, err } @@ -77,19 +77,12 @@ func (s *cpuacctGroup) Stats(d *data) (map[string]float64, error) { paramData["percentage"] = percentage // Delta usage is in nanoseconds of CPU time so get the usage (in cores) over the sample time. - paramData["usage"] = deltaUsage / float64(usageSampleDuration.Nanoseconds()) + paramData["usage"] = deltaUsage / usageSampleDuration.Nanoseconds() return paramData, nil } -func (s *cpuacctGroup) getProcStarttime(d *data) (float64, error) { - rawStart, err := system.GetProcessStartTime(d.pid) - if err != nil { - return 0, err - } - return strconv.ParseFloat(rawStart, 64) -} - -func (s *cpuacctGroup) getSystemCpuUsage(d *data) (float64, error) { +// TODO(vmarmol): Use cgroups stats. +func (s *cpuacctGroup) getSystemCpuUsage(d *data) (int64, error) { f, err := os.Open("/proc/stat") if err != nil { @@ -106,11 +99,11 @@ func (s *cpuacctGroup) getSystemCpuUsage(d *data) (float64, error) { return 0, fmt.Errorf("invalid number of cpu fields") } - var total float64 + var total int64 for _, i := range parts[1:8] { - v, err := strconv.ParseFloat(i, 64) + v, err := strconv.ParseInt(i, 10, 64) if err != nil { - return 0.0, fmt.Errorf("Unable to convert value %s to float: %s", i, err) + return 0.0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) } total += v } @@ -122,11 +115,11 @@ func (s *cpuacctGroup) getSystemCpuUsage(d *data) (float64, error) { return 0, fmt.Errorf("invalid stat format") } -func (s *cpuacctGroup) getCpuUsage(d *data, path string) (float64, error) { - cpuTotal := 0.0 +func (s *cpuacctGroup) getCpuUsage(d *data, path string) (int64, error) { + cpuTotal := int64(0) f, err := os.Open(filepath.Join(path, "cpuacct.stat")) if err != nil { - return 0.0, err + return 0, err } defer f.Close() @@ -134,7 +127,7 @@ func (s *cpuacctGroup) getCpuUsage(d *data, path string) (float64, error) { for sc.Scan() { _, v, err := getCgroupParamKeyValue(sc.Text()) if err != nil { - return 0.0, err + return 0, err } // set the raw data in map cpuTotal += v diff --git a/pkg/libcontainer/cgroups/fs/cpuset.go b/pkg/libcontainer/cgroups/fs/cpuset.go new file mode 100644 index 0000000000..af2dd528d0 --- /dev/null +++ b/pkg/libcontainer/cgroups/fs/cpuset.go @@ -0,0 +1,108 @@ +package fs + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strconv" +) + +type cpusetGroup struct { +} + +func (s *cpusetGroup) Set(d *data) error { + // we don't want to join this cgroup unless it is specified + if d.c.CpusetCpus != "" { + dir, err := d.path("cpuset") + if err != nil { + return err + } + if err := s.ensureParent(dir); err != nil { + return err + } + + // because we are not using d.join we need to place the pid into the procs file + // unlike the other subsystems + if err := writeFile(dir, "cgroup.procs", strconv.Itoa(d.pid)); err != nil { + return err + } + if err := writeFile(dir, "cpuset.cpus", d.c.CpusetCpus); err != nil { + return err + } + } + return nil +} + +func (s *cpusetGroup) Remove(d *data) error { + return removePath(d.path("cpuset")) +} + +func (s *cpusetGroup) Stats(d *data) (map[string]int64, error) { + return nil, ErrNotSupportStat +} + +func (s *cpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { + if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil { + return + } + if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil { + return + } + return cpus, mems, nil +} + +// ensureParent ensures that the parent directory of current is created +// with the proper cpus and mems files copied from it's parent if the values +// are a file with a new line char +func (s *cpusetGroup) ensureParent(current string) error { + parent := filepath.Dir(current) + + if _, err := os.Stat(parent); err != nil { + if !os.IsNotExist(err) { + return err + } + + if err := s.ensureParent(parent); err != nil { + return err + } + } + + if err := os.MkdirAll(current, 0755); err != nil && !os.IsExist(err) { + return err + } + return s.copyIfNeeded(current, parent) +} + +// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent +// directory to the current directory if the file's contents are 0 +func (s *cpusetGroup) copyIfNeeded(current, parent string) error { + var ( + err error + currentCpus, currentMems []byte + parentCpus, parentMems []byte + ) + + if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { + return err + } + if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { + return err + } + + if s.isEmpty(currentCpus) { + if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { + return err + } + } + if s.isEmpty(currentMems) { + if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { + return err + } + } + return nil +} + +func (s *cpusetGroup) isEmpty(b []byte) bool { + return len(bytes.Trim(b, "\n")) == 0 +} diff --git a/pkg/cgroups/fs/devices.go b/pkg/libcontainer/cgroups/fs/devices.go similarity index 86% rename from pkg/cgroups/fs/devices.go rename to pkg/libcontainer/cgroups/fs/devices.go index a2f91eda14..00fea608f9 100644 --- a/pkg/cgroups/fs/devices.go +++ b/pkg/libcontainer/cgroups/fs/devices.go @@ -1,9 +1,5 @@ package fs -import ( - "os" -) - type devicesGroup struct { } @@ -12,11 +8,6 @@ func (s *devicesGroup) Set(d *data) error { if err != nil { return err } - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() if !d.c.DeviceAccess { if err := writeFile(dir, "devices.deny", "a"); err != nil { @@ -64,6 +55,6 @@ func (s *devicesGroup) Remove(d *data) error { return removePath(d.path("devices")) } -func (s *devicesGroup) Stats(d *data) (map[string]float64, error) { +func (s *devicesGroup) Stats(d *data) (map[string]int64, error) { return nil, ErrNotSupportStat } diff --git a/pkg/cgroups/fs/freezer.go b/pkg/libcontainer/cgroups/fs/freezer.go similarity index 76% rename from pkg/cgroups/fs/freezer.go rename to pkg/libcontainer/cgroups/fs/freezer.go index 70cfcdde72..0738ec1f09 100644 --- a/pkg/cgroups/fs/freezer.go +++ b/pkg/libcontainer/cgroups/fs/freezer.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) type freezerGroup struct { @@ -35,9 +35,9 @@ func (s *freezerGroup) Remove(d *data) error { return removePath(d.path("freezer")) } -func (s *freezerGroup) Stats(d *data) (map[string]float64, error) { +func (s *freezerGroup) Stats(d *data) (map[string]int64, error) { var ( - paramData = make(map[string]float64) + paramData = make(map[string]int64) params = []string{ "parent_freezing", "self_freezing", @@ -50,6 +50,7 @@ func (s *freezerGroup) Stats(d *data) (map[string]float64, error) { return nil, err } + // TODO(vmarmol): This currently outputs nothing since the output is a string, fix. for _, param := range params { f, err := os.Open(filepath.Join(path, fmt.Sprintf("freezer.%s", param))) if err != nil { @@ -62,7 +63,7 @@ func (s *freezerGroup) Stats(d *data) (map[string]float64, error) { return nil, err } - v, err := strconv.ParseFloat(strings.TrimSuffix(string(data), "\n"), 64) + v, err := strconv.ParseInt(strings.TrimSuffix(string(data), "\n"), 10, 64) if err != nil { return nil, err } diff --git a/pkg/cgroups/fs/memory.go b/pkg/libcontainer/cgroups/fs/memory.go similarity index 91% rename from pkg/cgroups/fs/memory.go rename to pkg/libcontainer/cgroups/fs/memory.go index 837640c088..9964f83767 100644 --- a/pkg/cgroups/fs/memory.go +++ b/pkg/libcontainer/cgroups/fs/memory.go @@ -50,8 +50,8 @@ func (s *memoryGroup) Remove(d *data) error { return removePath(d.path("memory")) } -func (s *memoryGroup) Stats(d *data) (map[string]float64, error) { - paramData := make(map[string]float64) +func (s *memoryGroup) Stats(d *data) (map[string]int64, error) { + paramData := make(map[string]int64) path, err := d.path("memory") if err != nil { return nil, err @@ -79,7 +79,7 @@ func (s *memoryGroup) Stats(d *data) (map[string]float64, error) { "max_usage_in_bytes", } for _, param := range params { - value, err := getCgroupParamFloat64(path, fmt.Sprintf("memory.%s", param)) + value, err := getCgroupParamInt(path, fmt.Sprintf("memory.%s", param)) if err != nil { return nil, err } diff --git a/pkg/cgroups/fs/memory_test.go b/pkg/libcontainer/cgroups/fs/memory_test.go similarity index 96% rename from pkg/cgroups/fs/memory_test.go rename to pkg/libcontainer/cgroups/fs/memory_test.go index 6c1fb735e9..190d437b1c 100644 --- a/pkg/cgroups/fs/memory_test.go +++ b/pkg/libcontainer/cgroups/fs/memory_test.go @@ -25,7 +25,7 @@ func TestMemoryStats(t *testing.T) { if err != nil { t.Fatal(err) } - expectedStats := map[string]float64{"cache": 512.0, "rss": 1024.0, "usage_in_bytes": 2048.0, "max_usage_in_bytes": 4096.0} + expectedStats := map[string]int64{"cache": 512, "rss": 1024, "usage_in_bytes": 2048, "max_usage_in_bytes": 4096} expectStats(t, expectedStats, stats) } diff --git a/pkg/cgroups/fs/perf_event.go b/pkg/libcontainer/cgroups/fs/perf_event.go similarity index 76% rename from pkg/cgroups/fs/perf_event.go rename to pkg/libcontainer/cgroups/fs/perf_event.go index 789b3e59ad..1cf1aeef12 100644 --- a/pkg/cgroups/fs/perf_event.go +++ b/pkg/libcontainer/cgroups/fs/perf_event.go @@ -1,7 +1,7 @@ package fs import ( - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) type perfEventGroup struct { @@ -19,6 +19,6 @@ func (s *perfEventGroup) Remove(d *data) error { return removePath(d.path("perf_event")) } -func (s *perfEventGroup) Stats(d *data) (map[string]float64, error) { +func (s *perfEventGroup) Stats(d *data) (map[string]int64, error) { return nil, ErrNotSupportStat } diff --git a/pkg/cgroups/fs/test_util.go b/pkg/libcontainer/cgroups/fs/test_util.go similarity index 96% rename from pkg/cgroups/fs/test_util.go rename to pkg/libcontainer/cgroups/fs/test_util.go index 11b90b21d6..333386c5de 100644 --- a/pkg/cgroups/fs/test_util.go +++ b/pkg/libcontainer/cgroups/fs/test_util.go @@ -61,7 +61,7 @@ func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) { } // Expect the specified stats. -func expectStats(t *testing.T, expected, actual map[string]float64) { +func expectStats(t *testing.T, expected, actual map[string]int64) { for stat, expectedValue := range expected { actualValue, ok := actual[stat] if !ok { diff --git a/pkg/cgroups/fs/utils.go b/pkg/libcontainer/cgroups/fs/utils.go similarity index 56% rename from pkg/cgroups/fs/utils.go rename to pkg/libcontainer/cgroups/fs/utils.go index 8be65c97ea..7213b5d6a0 100644 --- a/pkg/cgroups/fs/utils.go +++ b/pkg/libcontainer/cgroups/fs/utils.go @@ -16,25 +16,25 @@ var ( // Parses a cgroup param and returns as name, value // i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234 -func getCgroupParamKeyValue(t string) (string, float64, error) { +func getCgroupParamKeyValue(t string) (string, int64, error) { parts := strings.Fields(t) switch len(parts) { case 2: - value, err := strconv.ParseFloat(parts[1], 64) + value, err := strconv.ParseInt(parts[1], 10, 64) if err != nil { - return "", 0.0, fmt.Errorf("Unable to convert param value to float: %s", err) + return "", 0, fmt.Errorf("Unable to convert param value to int: %s", err) } return parts[0], value, nil default: - return "", 0.0, ErrNotValidFormat + return "", 0, ErrNotValidFormat } } -// Gets a single float64 value from the specified cgroup file. -func getCgroupParamFloat64(cgroupPath, cgroupFile string) (float64, error) { +// Gets a single int64 value from the specified cgroup file. +func getCgroupParamInt(cgroupPath, cgroupFile string) (int64, error) { contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) if err != nil { - return -1.0, err + return -1, err } - return strconv.ParseFloat(strings.TrimSpace(string(contents)), 64) + return strconv.ParseInt(strings.TrimSpace(string(contents)), 10, 64) } diff --git a/pkg/cgroups/fs/utils_test.go b/pkg/libcontainer/cgroups/fs/utils_test.go similarity index 81% rename from pkg/cgroups/fs/utils_test.go rename to pkg/libcontainer/cgroups/fs/utils_test.go index c8f1b0172b..4dd2243efa 100644 --- a/pkg/cgroups/fs/utils_test.go +++ b/pkg/libcontainer/cgroups/fs/utils_test.go @@ -13,7 +13,7 @@ const ( floatString = "2048" ) -func TestGetCgroupParamsFloat64(t *testing.T) { +func TestGetCgroupParamsInt(t *testing.T) { // Setup tempdir. tempDir, err := ioutil.TempDir("", "cgroup_utils_test") if err != nil { @@ -27,7 +27,7 @@ func TestGetCgroupParamsFloat64(t *testing.T) { if err != nil { t.Fatal(err) } - value, err := getCgroupParamFloat64(tempDir, cgroupFile) + value, err := getCgroupParamInt(tempDir, cgroupFile) if err != nil { t.Fatal(err) } else if value != floatValue { @@ -39,7 +39,7 @@ func TestGetCgroupParamsFloat64(t *testing.T) { if err != nil { t.Fatal(err) } - value, err = getCgroupParamFloat64(tempDir, cgroupFile) + value, err = getCgroupParamInt(tempDir, cgroupFile) if err != nil { t.Fatal(err) } else if value != floatValue { @@ -51,7 +51,7 @@ func TestGetCgroupParamsFloat64(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = getCgroupParamFloat64(tempDir, cgroupFile) + _, err = getCgroupParamInt(tempDir, cgroupFile) if err == nil { t.Fatal("Expecting error, got none") } @@ -61,7 +61,7 @@ func TestGetCgroupParamsFloat64(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = getCgroupParamFloat64(tempDir, cgroupFile) + _, err = getCgroupParamInt(tempDir, cgroupFile) if err == nil { t.Fatal("Expecting error, got none") } diff --git a/pkg/libcontainer/cgroups/stats.go b/pkg/libcontainer/cgroups/stats.go new file mode 100644 index 0000000000..fbcd5dd234 --- /dev/null +++ b/pkg/libcontainer/cgroups/stats.go @@ -0,0 +1,59 @@ +package cgroups + +type ThrottlingData struct { + // Number of periods with throttling active + Periods int64 `json:"periods,omitempty"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods int64 `json:"throttled_periods,omitempty"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime int64 `json:"throttled_time,omitempty"` +} + +type CpuUsage struct { + // percentage of available CPUs currently being used. + PercentUsage int64 `json:"percent_usage,omitempty"` + // nanoseconds of cpu time consumed over the last 100 ms. + CurrentUsage int64 `json:"current_usage,omitempty"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage,omitempty"` + ThrottlingData ThrottlingData `json:"throlling_data,omitempty"` +} + +type MemoryStats struct { + // current res_counter usage for memory + Usage int64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage int64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]int64 `json:"stats,omitempty"` +} + +type BlkioStatEntry struct { + Major int64 `json:"major,omitempty"` + Minor int64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value int64 `json:"value,omitempty"` +} + +type BlockioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recusrive,omitempty"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` +} + +// TODO(Vishh): Remove freezer from stats since it does not logically belong in stats. +type FreezerStats struct { + ParentState string `json:"parent_state,omitempty"` + SelfState string `json:"self_state,omitempty"` +} + +type Stats struct { + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlockioStats BlockioStats `json:"blockio_stats,omitempty"` + FreezerStats FreezerStats `json:"freezer_stats,omitempty"` +} diff --git a/pkg/libcontainer/cgroups/systemd/apply_nosystemd.go b/pkg/libcontainer/cgroups/systemd/apply_nosystemd.go new file mode 100644 index 0000000000..0fff3e4c6b --- /dev/null +++ b/pkg/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -0,0 +1,21 @@ +// +build !linux + +package systemd + +import ( + "fmt" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" +) + +func UseSystemd() bool { + return false +} + +func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + return nil, fmt.Errorf("Systemd not supported") +} diff --git a/pkg/cgroups/systemd/apply_systemd.go b/pkg/libcontainer/cgroups/systemd/apply_systemd.go similarity index 88% rename from pkg/cgroups/systemd/apply_systemd.go rename to pkg/libcontainer/cgroups/systemd/apply_systemd.go index c4b0937b63..0f6beb658e 100644 --- a/pkg/cgroups/systemd/apply_systemd.go +++ b/pkg/libcontainer/cgroups/systemd/apply_systemd.go @@ -3,6 +3,7 @@ package systemd import ( + "fmt" "io/ioutil" "os" "path/filepath" @@ -11,7 +12,7 @@ import ( "sync" systemd1 "github.com/coreos/go-systemd/dbus" - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" "github.com/dotcloud/docker/pkg/systemd" "github.com/godbus/dbus" ) @@ -78,7 +79,7 @@ type cgroupArg struct { func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { var ( - unitName = c.Parent + "-" + c.Name + ".scope" + unitName = getUnitName(c) slice = "system.slice" properties []systemd1.Property cpuArgs []cgroupArg @@ -174,13 +175,22 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { path := filepath.Join(mountpoint, cgroup) - // /dev/pts/* - if err := ioutil.WriteFile(filepath.Join(path, "devices.allow"), []byte("c 136:* rwm"), 0700); err != nil { - return nil, err + allow := []string{ + // allow mknod for any device + "c *:* m", + "b *:* m", + + // /dev/pts/ - pts namespaces are "coming soon" + "c 136:* rwm", + + // tuntap + "c 10:200 rwm", } - // tuntap - if err := ioutil.WriteFile(filepath.Join(path, "devices.allow"), []byte("c 10:200 rwm"), 0700); err != nil { - return nil, err + + for _, val := range allow { + if err := ioutil.WriteFile(filepath.Join(path, "devices.allow"), []byte(val), 0700); err != nil { + return nil, err + } } } @@ -294,3 +304,24 @@ func (c *systemdCgroup) Cleanup() error { return nil } + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + unitName := getUnitName(c) + + mountpoint, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return nil, err + } + + props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName)) + if err != nil { + return nil, err + } + cgroup := props["ControlGroup"].(string) + + return cgroups.ReadProcsFile(filepath.Join(mountpoint, cgroup)) +} + +func getUnitName(c *cgroups.Cgroup) string { + return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name) +} diff --git a/pkg/cgroups/utils.go b/pkg/libcontainer/cgroups/utils.go similarity index 76% rename from pkg/cgroups/utils.go rename to pkg/libcontainer/cgroups/utils.go index 02a7f357f6..111c871477 100644 --- a/pkg/cgroups/utils.go +++ b/pkg/libcontainer/cgroups/utils.go @@ -4,6 +4,8 @@ import ( "bufio" "io" "os" + "path/filepath" + "strconv" "strings" "github.com/dotcloud/docker/pkg/mount" @@ -49,6 +51,30 @@ func GetInitCgroupDir(subsystem string) (string, error) { return parseCgroupFile(subsystem, f) } +func ReadProcsFile(dir string) ([]int, error) { + f, err := os.Open(filepath.Join(dir, "cgroup.procs")) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + s = bufio.NewScanner(f) + out = []int{} + ) + + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, pid) + } + } + return out, nil +} + func parseCgroupFile(subsystem string, r io.Reader) (string, error) { s := bufio.NewScanner(r) for s.Scan() { diff --git a/pkg/libcontainer/console/console.go b/pkg/libcontainer/console/console.go index 5f06aea225..62edbfde8a 100644 --- a/pkg/libcontainer/console/console.go +++ b/pkg/libcontainer/console/console.go @@ -40,9 +40,6 @@ func Setup(rootfs, consolePath, mountLabel string) error { if err := label.SetFileLabel(consolePath, mountLabel); err != nil { return fmt.Errorf("set file label %s %s", dest, err) } - if err := system.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil { - return fmt.Errorf("bind %s to %s %s", consolePath, dest, err) - } return nil } diff --git a/pkg/libcontainer/container.go b/pkg/libcontainer/container.go index 5acdff3d29..6734bfd590 100644 --- a/pkg/libcontainer/container.go +++ b/pkg/libcontainer/container.go @@ -1,29 +1,72 @@ package libcontainer import ( - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) -// Context is a generic key value pair that allows -// arbatrary data to be sent +// Context is a generic key value pair that allows arbatrary data to be sent type Context map[string]string -// Container defines configuration options for how a -// container is setup inside a directory and how a process should be executed +// Container defines configuration options for executing a process inside a contained environment type Container struct { - Hostname string `json:"hostname,omitempty"` // hostname - ReadonlyFs bool `json:"readonly_fs,omitempty"` // set the containers rootfs as readonly - NoPivotRoot bool `json:"no_pivot_root,omitempty"` // this can be enabled if you are running in ramdisk - User string `json:"user,omitempty"` // user to execute the process as - WorkingDir string `json:"working_dir,omitempty"` // current working directory - Env []string `json:"environment,omitempty"` // environment to set - Tty bool `json:"tty,omitempty"` // setup a proper tty or not - Namespaces map[string]bool `json:"namespaces,omitempty"` // namespaces to apply - CapabilitiesMask map[string]bool `json:"capabilities_mask,omitempty"` // capabilities to drop - Networks []*Network `json:"networks,omitempty"` // nil for host's network stack - Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups - Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) - Mounts Mounts `json:"mounts,omitempty"` + // Hostname optionally sets the container's hostname if provided + Hostname string `json:"hostname,omitempty"` + + // ReadonlyFs will remount the container's rootfs as readonly where only externally mounted + // bind mounts are writtable + ReadonlyFs bool `json:"readonly_fs,omitempty"` + + // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs + // This is a common option when the container is running in ramdisk + NoPivotRoot bool `json:"no_pivot_root,omitempty"` + + // User will set the uid and gid of the executing process running inside the container + User string `json:"user,omitempty"` + + // WorkingDir will change the processes current working directory inside the container's rootfs + WorkingDir string `json:"working_dir,omitempty"` + + // Env will populate the processes environment with the provided values + // Any values from the parent processes will be cleared before the values + // provided in Env are provided to the process + Env []string `json:"environment,omitempty"` + + // Tty when true will allocate a pty slave on the host for access by the container's process + // and ensure that it is mounted inside the container's rootfs + Tty bool `json:"tty,omitempty"` + + // Namespaces specifies the container's namespaces that it should setup when cloning the init process + // If a namespace is not provided that namespace is shared from the container's parent process + Namespaces map[string]bool `json:"namespaces,omitempty"` + + // Capabilities specify the capabilities to keep when executing the process inside the container + // All capbilities not specified will be dropped from the processes capability mask + Capabilities []string `json:"capabilities,omitempty"` + + // Networks specifies the container's network setup to be created + Networks []*Network `json:"networks,omitempty"` + + // Cgroups specifies specific cgroup settings for the various subsystems that the container is + // placed into to limit the resources the container has available + Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` + + // Context is a generic key value format that allows for additional settings to be passed + // on the container's creation + // This is commonly used to specify apparmor profiles, selinux labels, and different restrictions + // placed on the container's processes + Context Context `json:"context,omitempty"` + + // Mounts specify additional source and destination paths that will be mounted inside the container's + // rootfs and mount namespace if specified + Mounts Mounts `json:"mounts,omitempty"` + + // RequiredDeviceNodes are a list of device nodes that will be mknod into the container's rootfs at /dev + // If the host system does not support the device that the container requests an error is returned + RequiredDeviceNodes []string `json:"required_device_nodes,omitempty"` + + // OptionalDeviceNodes are a list of device nodes that will be mknod into the container's rootfs at /dev + // If the host system does not support the device that the container requests the error is ignored + OptionalDeviceNodes []string `json:"optional_device_nodes,omitempty"` } // Network defines configuration for a container's networking stack @@ -31,9 +74,20 @@ type Container struct { // The network configuration can be omited from a container causing the // container to be setup with the host's networking stack type Network struct { - Type string `json:"type,omitempty"` // type of networking to setup i.e. veth, macvlan, etc - Context Context `json:"context,omitempty"` // generic context for type specific networking options - Address string `json:"address,omitempty"` - Gateway string `json:"gateway,omitempty"` - Mtu int `json:"mtu,omitempty"` + // Type sets the networks type, commonly veth and loopback + Type string `json:"type,omitempty"` + + // Context is a generic key value format for setting additional options that are specific to + // the network type + Context Context `json:"context,omitempty"` + + // Address contains the IP and mask to set on the network interface + Address string `json:"address,omitempty"` + + // Gateway sets the gateway address that is used as the default for the interface + Gateway string `json:"gateway,omitempty"` + + // Mtu sets the mtu value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + Mtu int `json:"mtu,omitempty"` } diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index 33d79600d4..ba8117091d 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -24,24 +24,9 @@ "mtu": 1500 } ], - "capabilities_mask": { - "SYSLOG": false, - "MKNOD": true, - "NET_ADMIN": false, - "MAC_ADMIN": false, - "MAC_OVERRIDE": false, - "AUDIT_CONTROL": false, - "AUDIT_WRITE": false, - "SYS_TTY_CONFIG": false, - "SETPCAP": false, - "SYS_MODULE": false, - "SYS_RAWIO": false, - "SYS_PACCT": false, - "SYS_ADMIN": false, - "SYS_NICE": false, - "SYS_RESOURCE": false, - "SYS_TIME": false - }, + "capabilities": [ + "MKNOD" + ], "cgroups": { "name": "docker-koye", "parent": "docker" @@ -58,5 +43,13 @@ { "type": "devtmpfs" } + ], + "required_device_nodes": [ + "/dev/null", + "/dev/zero", + "/dev/full", + "/dev/random", + "/dev/urandom", + "/dev/tty" ] } diff --git a/pkg/libcontainer/container_test.go b/pkg/libcontainer/container_test.go index c02385af3f..f6e991edf5 100644 --- a/pkg/libcontainer/container_test.go +++ b/pkg/libcontainer/container_test.go @@ -4,8 +4,20 @@ import ( "encoding/json" "os" "testing" + + "github.com/dotcloud/docker/pkg/libcontainer/mount/nodes" ) +// Checks whether the expected capability is specified in the capabilities. +func contains(expected string, values []string) bool { + for _, v := range values { + if v == expected { + return true + } + } + return false +} + func TestContainerJsonFormat(t *testing.T) { f, err := os.Open("container.json") if err != nil { @@ -37,23 +49,25 @@ func TestContainerJsonFormat(t *testing.T) { t.Fail() } - if _, exists := container.CapabilitiesMask["SYS_ADMIN"]; !exists { - t.Log("capabilities mask should contain SYS_ADMIN") - t.Fail() - } - - if container.CapabilitiesMask["SYS_ADMIN"] { + if contains("SYS_ADMIN", container.Capabilities) { t.Log("SYS_ADMIN should not be enabled in capabilities mask") t.Fail() } - if !container.CapabilitiesMask["MKNOD"] { + if !contains("MKNOD", container.Capabilities) { t.Log("MKNOD should be enabled in capabilities mask") t.Fail() } - if container.CapabilitiesMask["SYS_CHROOT"] { + if contains("SYS_CHROOT", container.Capabilities) { t.Log("capabilities mask should not contain SYS_CHROOT") t.Fail() } + + for _, n := range nodes.DefaultNodes { + if !contains(n, container.RequiredDeviceNodes) { + t.Logf("devices should contain %s", n) + t.Fail() + } + } } diff --git a/pkg/libcontainer/mount/init.go b/pkg/libcontainer/mount/init.go index cfe61d1532..82c76aad72 100644 --- a/pkg/libcontainer/mount/init.go +++ b/pkg/libcontainer/mount/init.go @@ -11,6 +11,7 @@ import ( "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/mount/nodes" + "github.com/dotcloud/docker/pkg/symlink" "github.com/dotcloud/docker/pkg/system" ) @@ -36,7 +37,7 @@ func InitializeMountNamespace(rootfs, console string, container *libcontainer.Co flag = syscall.MS_SLAVE } if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil { - return fmt.Errorf("mounting / as slave %s", err) + return fmt.Errorf("mounting / with flags %X %s", (flag | syscall.MS_REC), err) } if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { return fmt.Errorf("mouting %s as bind %s", rootfs, err) @@ -47,12 +48,18 @@ func InitializeMountNamespace(rootfs, console string, container *libcontainer.Co if err := setupBindmounts(rootfs, container.Mounts); err != nil { return fmt.Errorf("bind mounts %s", err) } - if err := nodes.CopyN(rootfs, nodes.DefaultNodes); err != nil { - return fmt.Errorf("copy dev nodes %s", err) + if err := nodes.CopyN(rootfs, container.RequiredDeviceNodes, true); err != nil { + return fmt.Errorf("copy required dev nodes %s", err) + } + if err := nodes.CopyN(rootfs, container.OptionalDeviceNodes, false); err != nil { + return fmt.Errorf("copy optional dev nodes %s", err) } if err := SetupPtmx(rootfs, console, container.Context["mount_label"]); err != nil { return err } + if err := setupDevSymlinks(rootfs); err != nil { + return fmt.Errorf("dev symlinks %s", err) + } if err := system.Chdir(rootfs); err != nil { return fmt.Errorf("chdir into %s %s", rootfs, err) } @@ -91,6 +98,56 @@ func mountSystem(rootfs string, container *libcontainer.Container) error { return nil } +func createIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } else { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + } + return nil +} + +func setupDevSymlinks(rootfs string) error { + var links = [][2]string{ + {"/proc/self/fd", "/dev/fd"}, + {"/proc/self/fd/0", "/dev/stdin"}, + {"/proc/self/fd/1", "/dev/stdout"}, + {"/proc/self/fd/2", "/dev/stderr"}, + } + + // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink + // in /dev if it exists in /proc. + if _, err := os.Stat("/proc/kcore"); err == nil { + links = append(links, [2]string{"/proc/kcore", "/dev/kcore"}) + } + + for _, link := range links { + var ( + src = link[0] + dst = filepath.Join(rootfs, link[1]) + ) + + if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { + return fmt.Errorf("symlink %s %s %s", src, dst, err) + } + } + + return nil +} + func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error { for _, m := range bindMounts.OfType("bind") { var ( @@ -100,6 +157,21 @@ func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error { if !m.Writable { flags = flags | syscall.MS_RDONLY } + + stat, err := os.Stat(m.Source) + if err != nil { + return err + } + + dest, err = symlink.FollowSymlinkInScope(dest, rootfs) + if err != nil { + return err + } + + if err := createIfNotExists(dest, stat.IsDir()); err != nil { + return fmt.Errorf("Creating new bind-mount target, %s", err) + } + if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil { return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err) } @@ -123,12 +195,10 @@ func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mo systemMounts := []mount{ {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, {source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}, + {source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}, {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, } - if len(mounts.OfType("devtmpfs")) == 1 { - systemMounts = append([]mount{{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}}, systemMounts...) - } return systemMounts } diff --git a/pkg/libcontainer/mount/nodes/nodes.go b/pkg/libcontainer/mount/nodes/nodes.go index 5022f85b0b..f8e6e97450 100644 --- a/pkg/libcontainer/mount/nodes/nodes.go +++ b/pkg/libcontainer/mount/nodes/nodes.go @@ -4,46 +4,85 @@ package nodes import ( "fmt" - "github.com/dotcloud/docker/pkg/system" + "io/ioutil" "os" "path/filepath" "syscall" + + "github.com/dotcloud/docker/pkg/system" ) // Default list of device nodes to copy var DefaultNodes = []string{ - "null", - "zero", - "full", - "random", - "urandom", - "tty", + "/dev/null", + "/dev/zero", + "/dev/full", + "/dev/random", + "/dev/urandom", + "/dev/tty", } // CopyN copies the device node from the host into the rootfs -func CopyN(rootfs string, nodesToCopy []string) error { +func CopyN(rootfs string, nodesToCopy []string, shouldExist bool) error { oldMask := system.Umask(0000) defer system.Umask(oldMask) for _, node := range nodesToCopy { - if err := Copy(rootfs, node); err != nil { + if err := Copy(rootfs, node, shouldExist); err != nil { return err } } return nil } -func Copy(rootfs, node string) error { - stat, err := os.Stat(filepath.Join("/dev", node)) +// Copy copies the device node into the rootfs. If the node +// on the host system does not exist and the boolean flag is passed +// an error will be returned +func Copy(rootfs, node string, shouldExist bool) error { + stat, err := os.Stat(node) if err != nil { + if os.IsNotExist(err) && !shouldExist { + return nil + } return err } + var ( - dest = filepath.Join(rootfs, "dev", node) - st = stat.Sys().(*syscall.Stat_t) + dest = filepath.Join(rootfs, node) + st = stat.Sys().(*syscall.Stat_t) + parent = filepath.Dir(dest) ) + + if err := os.MkdirAll(parent, 0755); err != nil { + return err + } + if err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) { - return fmt.Errorf("copy %s %s", node, err) + return fmt.Errorf("mknod %s %s", node, err) } return nil } + +func getNodes(path string) ([]string, error) { + out := []string{} + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + for _, f := range files { + if f.IsDir() && f.Name() != "pts" && f.Name() != "shm" { + sub, err := getNodes(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + out = append(out, sub...) + } else if f.Mode()&os.ModeDevice == os.ModeDevice { + out = append(out, filepath.Join(path, f.Name())) + } + } + return out, nil +} + +func GetHostDeviceNodes() ([]string, error) { + return getNodes("/dev") +} diff --git a/pkg/libcontainer/mount/nodes/nodes_unsupported.go b/pkg/libcontainer/mount/nodes/nodes_unsupported.go new file mode 100644 index 0000000000..24409f411f --- /dev/null +++ b/pkg/libcontainer/mount/nodes/nodes_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package nodes + +import "github.com/dotcloud/docker/pkg/libcontainer" + +var DefaultNodes = []string{} + +func GetHostDeviceNodes() ([]string, error) { + return nil, libcontainer.ErrUnsupported +} diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 5d0d772a0f..fbc7512047 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -7,10 +7,10 @@ import ( "os/exec" "syscall" - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/pkg/cgroups/fs" - "github.com/dotcloud/docker/pkg/cgroups/systemd" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups/fs" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups/systemd" "github.com/dotcloud/docker/pkg/libcontainer/network" "github.com/dotcloud/docker/pkg/system" ) @@ -123,6 +123,7 @@ func DefaultCreateCommand(container *libcontainer.Container, console, rootfs, da command.Env = append(os.Environ(), env...) system.SetCloneFlags(command, uintptr(GetNamespaceFlags(container.Namespaces))) + command.SysProcAttr.Pdeathsig = syscall.SIGKILL command.ExtraFiles = []*os.File{pipe} return command diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 3bbcfcc654..3012106769 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -81,16 +81,57 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, return fmt.Errorf("set process label %s", err) } if container.Context["restrictions"] != "" { - if err := restrict.Restrict("proc", "sys"); err != nil { + if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus", "sys"); err != nil { return err } } + + pdeathSignal, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + if err := FinalizeNamespace(container); err != nil { return fmt.Errorf("finalize namespace %s", err) } + + // FinalizeNamespace can change user/group which clears the parent death + // signal, so we restore it here. + if err := RestoreParentDeathSignal(pdeathSignal); err != nil { + return fmt.Errorf("restore parent death signal %s", err) + } + return system.Execv(args[0], args[0:], container.Env) } +// RestoreParentDeathSignal sets the parent death signal to old. +func RestoreParentDeathSignal(old int) error { + if old == 0 { + return nil + } + + current, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + + if old == current { + return nil + } + + if err := system.ParentDeathSignal(uintptr(old)); err != nil { + return fmt.Errorf("set parent death signal %s", err) + } + + // Signal self if parent is already dead. Does nothing if running in a new + // PID namespace, as Getppid will always return 0. + if syscall.Getppid() == 1 { + return syscall.Kill(syscall.Getpid(), syscall.Signal(old)) + } + + return nil +} + // SetupUser changes the groups, gid, and uid for the user inside the container func SetupUser(u string) error { uid, gid, suppGids, err := user.GetUserGroupSupplementary(u, syscall.Getuid(), syscall.Getgid()) @@ -152,6 +193,9 @@ func LoadContainerEnvironment(container *libcontainer.Container) error { os.Clearenv() for _, pair := range container.Env { p := strings.SplitN(pair, "=", 2) + if len(p) < 2 { + return fmt.Errorf("invalid environment '%v'", pair) + } if err := os.Setenv(p[0], p[1]); err != nil { return err } diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index b5325d40b3..b076b5c55c 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -5,6 +5,8 @@ import ( "io/ioutil" "log" "os" + "os/exec" + "os/signal" "path/filepath" "strconv" @@ -39,7 +41,7 @@ func main() { exitCode, err = nsinit.ExecIn(container, nspid, os.Args[2:]) } else { term := nsinit.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) - exitCode, err = nsinit.Exec(container, term, "", dataPath, os.Args[2:], nsinit.DefaultCreateCommand, nil) + exitCode, err = startContainer(container, term, dataPath, os.Args[2:]) } if err != nil { @@ -95,3 +97,31 @@ func readPid() (int, error) { } return pid, nil } + +// startContainer starts the container. Returns the exit status or -1 and an +// error. +// +// Signals sent to the current process will be forwarded to container. +func startContainer(container *libcontainer.Container, term nsinit.Terminal, dataPath string, args []string) (int, error) { + var ( + cmd *exec.Cmd + sigc = make(chan os.Signal, 10) + ) + + signal.Notify(sigc) + + createCommand := func(container *libcontainer.Container, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + cmd = nsinit.DefaultCreateCommand(container, console, rootfs, dataPath, init, pipe, args) + return cmd + } + + startCallback := func() { + go func() { + for sig := range sigc { + cmd.Process.Signal(sig) + } + }() + } + + return nsinit.Exec(container, term, "", dataPath, args, createCommand, startCallback) +} diff --git a/pkg/libcontainer/nsinit/unsupported.go b/pkg/libcontainer/nsinit/unsupported.go index 929b3dba5b..51509f79a2 100644 --- a/pkg/libcontainer/nsinit/unsupported.go +++ b/pkg/libcontainer/nsinit/unsupported.go @@ -3,8 +3,8 @@ package nsinit import ( - "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { diff --git a/pkg/libcontainer/security/capabilities/capabilities.go b/pkg/libcontainer/security/capabilities/capabilities.go index ad13e672c7..ba72070f50 100644 --- a/pkg/libcontainer/security/capabilities/capabilities.go +++ b/pkg/libcontainer/security/capabilities/capabilities.go @@ -7,32 +7,32 @@ import ( "github.com/syndtr/gocapability/capability" ) -// DropCapabilities drops capabilities for the current process based -// on the container's configuration. -func DropCapabilities(container *libcontainer.Container) error { - if drop := getCapabilitiesMask(container); len(drop) > 0 { - c, err := capability.NewPid(os.Getpid()) - if err != nil { - return err - } - c.Unset(capability.CAPS|capability.BOUNDS, drop...) +const allCapabilityTypes = capability.CAPS | capability.BOUNDS - if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { - return err - } +// DropCapabilities drops all capabilities for the current process expect those specified in the container configuration. +func DropCapabilities(container *libcontainer.Container) error { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + keep := getEnabledCapabilities(container) + c.Clear(allCapabilityTypes) + c.Set(allCapabilityTypes, keep...) + + if err := c.Apply(allCapabilityTypes); err != nil { + return err } return nil } -// getCapabilitiesMask returns the specific cap mask values for the libcontainer types -func getCapabilitiesMask(container *libcontainer.Container) []capability.Cap { - drop := []capability.Cap{} - for key, enabled := range container.CapabilitiesMask { - if !enabled { - if c := libcontainer.GetCapability(key); c != nil { - drop = append(drop, c.Value) - } +// getEnabledCapabilities returns the capabilities that should not be dropped by the container. +func getEnabledCapabilities(container *libcontainer.Container) []capability.Cap { + keep := []capability.Cap{} + for _, capability := range container.Capabilities { + if c := libcontainer.GetCapability(capability); c != nil { + keep = append(keep, c.Value) } } - return drop + return keep } diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index e1296b1d7f..a22a1aa73e 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -4,22 +4,42 @@ package restrict import ( "fmt" + "os" "syscall" "github.com/dotcloud/docker/pkg/system" ) +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +func mountReadonly(path string) error { + if err := system.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil { + if err == syscall.EINVAL { + // Probably not a mountpoint, use bind-mount + if err := system.Mount(path, path, "", syscall.MS_BIND, ""); err != nil { + return err + } + if err := system.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, ""); err != nil { + return err + } + } else { + return err + } + } + return nil +} + // This has to be called while the container still has CAP_SYS_ADMIN (to be able to perform mounts). // However, afterwards, CAP_SYS_ADMIN should be dropped (otherwise the user will be able to revert those changes). func Restrict(mounts ...string) error { // remount proc and sys as readonly for _, dest := range mounts { - if err := system.Mount("", dest, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil { + if err := mountReadonly(dest); err != nil { return fmt.Errorf("unable to remount %s readonly: %s", dest, err) } } - if err := system.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil { - return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore") + if err := system.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore: %s", err) } return nil } diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go index 8f056c817d..834201036f 100644 --- a/pkg/libcontainer/types.go +++ b/pkg/libcontainer/types.go @@ -55,6 +55,29 @@ var ( {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, {Key: "SYSLOG", Value: capability.CAP_SYSLOG}, + {Key: "SETUID", Value: capability.CAP_SETUID}, + {Key: "SETGID", Value: capability.CAP_SETGID}, + {Key: "CHOWN", Value: capability.CAP_CHOWN}, + {Key: "NET_RAW", Value: capability.CAP_NET_RAW}, + {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE}, + {Key: "FOWNER", Value: capability.CAP_FOWNER}, + {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH}, + {Key: "FSETID", Value: capability.CAP_FSETID}, + {Key: "KILL", Value: capability.CAP_KILL}, + {Key: "SETGID", Value: capability.CAP_SETGID}, + {Key: "SETUID", Value: capability.CAP_SETUID}, + {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE}, + {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE}, + {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST}, + {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK}, + {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER}, + {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT}, + {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE}, + {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT}, + {Key: "LEASE", Value: capability.CAP_LEASE}, + {Key: "SETFCAP", Value: capability.CAP_SETFCAP}, + {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM}, + {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND}, } ) @@ -118,6 +141,14 @@ func GetCapability(key string) *Capability { return nil } +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + // Contains returns true if the specified Capability is // in the slice func (c Capabilities) Contains(capp string) bool { diff --git a/pkg/cgroups/MAINTAINERS b/pkg/symlink/MAINTAINERS similarity index 59% rename from pkg/cgroups/MAINTAINERS rename to pkg/symlink/MAINTAINERS index 1e998f8ac1..68a97d2fc2 100644 --- a/pkg/cgroups/MAINTAINERS +++ b/pkg/symlink/MAINTAINERS @@ -1 +1,2 @@ Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/utils/fs.go b/pkg/symlink/fs.go similarity index 62% rename from utils/fs.go rename to pkg/symlink/fs.go index e07ced75d7..257491f91b 100644 --- a/utils/fs.go +++ b/pkg/symlink/fs.go @@ -1,42 +1,14 @@ -package utils +package symlink import ( "fmt" "os" + "path" "path/filepath" "strings" - "syscall" ) -// TreeSize walks a directory tree and returns its total size in bytes. -func TreeSize(dir string) (size int64, err error) { - data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - // Check inode to handle hard links correctly - inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { - return nil - } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = struct{}{} - - size += s - - return nil - }) - return -} +const maxLoopCounter = 100 // FollowSymlink will follow an existing link and scope it to the root // path provided. @@ -61,7 +33,14 @@ func FollowSymlinkInScope(link, root string) (string, error) { prev = filepath.Join(prev, p) prev = filepath.Clean(prev) + loopCounter := 0 for { + loopCounter++ + + if loopCounter >= maxLoopCounter { + return "", fmt.Errorf("loopCounter reached MAX: %v", loopCounter) + } + if !strings.HasPrefix(prev, root) { // Don't resolve symlinks outside of root. For example, // we don't have to check /home in the below. @@ -84,10 +63,9 @@ func FollowSymlinkInScope(link, root string) (string, error) { return "", err } - switch dest[0] { - case '/': + if path.IsAbs(dest) { prev = filepath.Join(root, dest) - case '.': + } else { prev, _ = filepath.Abs(prev) if prev = filepath.Clean(filepath.Join(filepath.Dir(prev), dest)); len(prev) < len(root) { diff --git a/utils/fs_test.go b/pkg/symlink/fs_test.go similarity index 88% rename from utils/fs_test.go rename to pkg/symlink/fs_test.go index 9affc00e91..d85fd6da74 100644 --- a/utils/fs_test.go +++ b/pkg/symlink/fs_test.go @@ -1,4 +1,4 @@ -package utils +package symlink import ( "io/ioutil" @@ -28,6 +28,19 @@ func TestFollowSymLinkNormal(t *testing.T) { } } +func TestFollowSymLinkRelativePath(t *testing.T) { + link := "testdata/fs/i" + + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/a"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } +} + func TestFollowSymLinkUnderLinkedDir(t *testing.T) { dir, err := ioutil.TempDir("", "docker-fs-test") if err != nil { diff --git a/utils/testdata/fs/a/d b/pkg/symlink/testdata/fs/a/d similarity index 100% rename from utils/testdata/fs/a/d rename to pkg/symlink/testdata/fs/a/d diff --git a/utils/testdata/fs/a/e b/pkg/symlink/testdata/fs/a/e similarity index 100% rename from utils/testdata/fs/a/e rename to pkg/symlink/testdata/fs/a/e diff --git a/utils/testdata/fs/a/f b/pkg/symlink/testdata/fs/a/f similarity index 100% rename from utils/testdata/fs/a/f rename to pkg/symlink/testdata/fs/a/f diff --git a/utils/testdata/fs/b/h b/pkg/symlink/testdata/fs/b/h similarity index 100% rename from utils/testdata/fs/b/h rename to pkg/symlink/testdata/fs/b/h diff --git a/utils/testdata/fs/g b/pkg/symlink/testdata/fs/g similarity index 100% rename from utils/testdata/fs/g rename to pkg/symlink/testdata/fs/g diff --git a/pkg/symlink/testdata/fs/i b/pkg/symlink/testdata/fs/i new file mode 120000 index 0000000000..2e65efe2a1 --- /dev/null +++ b/pkg/symlink/testdata/fs/i @@ -0,0 +1 @@ +a \ No newline at end of file diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go index 27af37bb89..bdc192b913 100644 --- a/pkg/sysinfo/sysinfo.go +++ b/pkg/sysinfo/sysinfo.go @@ -1,11 +1,12 @@ package sysinfo import ( - "github.com/dotcloud/docker/pkg/cgroups" "io/ioutil" "log" "os" "path" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) type SysInfo struct { diff --git a/pkg/system/calls_linux.go b/pkg/system/calls_linux.go index cc4727aaa2..faead0114e 100644 --- a/pkg/system/calls_linux.go +++ b/pkg/system/calls_linux.go @@ -3,6 +3,7 @@ package system import ( "os/exec" "syscall" + "unsafe" ) func Chroot(dir string) error { @@ -122,6 +123,18 @@ func ParentDeathSignal(sig uintptr) error { return nil } +func GetParentDeathSignal() (int, error) { + var sig int + + _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) + + if err != 0 { + return -1, err + } + + return sig, nil +} + func Setctty() error { if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { return err diff --git a/pkg/units/MAINTAINERS b/pkg/units/MAINTAINERS new file mode 100644 index 0000000000..68a97d2fc2 --- /dev/null +++ b/pkg/units/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/pkg/units/duration.go b/pkg/units/duration.go new file mode 100644 index 0000000000..cd33121496 --- /dev/null +++ b/pkg/units/duration.go @@ -0,0 +1,31 @@ +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.) +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%f years", d.Hours()/24/365) +} diff --git a/pkg/units/size.go b/pkg/units/size.go new file mode 100644 index 0000000000..99c8800965 --- /dev/null +++ b/pkg/units/size.go @@ -0,0 +1,56 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// HumanSize returns a human-readable approximation of a size +// using SI standard (eg. "44kB", "17MB") +func HumanSize(size int64) string { + i := 0 + var sizef float64 + sizef = float64(size) + units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + for sizef >= 1000.0 { + sizef = sizef / 1000.0 + i++ + } + return fmt.Sprintf("%.4g %s", sizef, units[i]) +} + +// Parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes or gibibytes, and returns the +// number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (bytes int64, err error) { + re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$") + if error != nil { + return -1, error + } + + matches := re.FindStringSubmatch(size) + + if len(matches) != 3 { + return -1, fmt.Errorf("Invalid size: '%s'", size) + } + + memLimit, error := strconv.ParseInt(matches[1], 10, 0) + if error != nil { + return -1, error + } + + unit := strings.ToLower(matches[2]) + + if unit == "k" { + memLimit *= 1024 + } else if unit == "m" { + memLimit *= 1024 * 1024 + } else if unit == "g" { + memLimit *= 1024 * 1024 * 1024 + } + + return memLimit, nil +} diff --git a/pkg/units/size_test.go b/pkg/units/size_test.go new file mode 100644 index 0000000000..958a4ca13d --- /dev/null +++ b/pkg/units/size_test.go @@ -0,0 +1,54 @@ +package units + +import ( + "strings" + "testing" +) + +func TestHumanSize(t *testing.T) { + + size := strings.Trim(HumanSize(1000), " \t") + expect := "1 kB" + if size != expect { + t.Errorf("1000 -> expected '%s', got '%s'", expect, size) + } + + size = strings.Trim(HumanSize(1024), " \t") + expect = "1.024 kB" + if size != expect { + t.Errorf("1024 -> expected '%s', got '%s'", expect, size) + } +} + +func TestRAMInBytes(t *testing.T) { + assertRAMInBytes(t, "32", false, 32) + assertRAMInBytes(t, "32b", false, 32) + assertRAMInBytes(t, "32B", false, 32) + assertRAMInBytes(t, "32k", false, 32*1024) + assertRAMInBytes(t, "32K", false, 32*1024) + assertRAMInBytes(t, "32kb", false, 32*1024) + assertRAMInBytes(t, "32Kb", false, 32*1024) + assertRAMInBytes(t, "32Mb", false, 32*1024*1024) + assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024) + + assertRAMInBytes(t, "", true, -1) + assertRAMInBytes(t, "hello", true, -1) + assertRAMInBytes(t, "-32", true, -1) + assertRAMInBytes(t, " 32 ", true, -1) + assertRAMInBytes(t, "32 mb", true, -1) + assertRAMInBytes(t, "32m b", true, -1) + assertRAMInBytes(t, "32bm", true, -1) +} + +func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) { + actualBytes, err := RAMInBytes(size) + if (err != nil) && !expectError { + t.Errorf("Unexpected error parsing '%s': %s", size, err) + } + if (err == nil) && expectError { + t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes) + } + if actualBytes != expectedBytes { + t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes) + } +} diff --git a/pkg/user/user.go b/pkg/user/user.go index 1672f7e679..df47101221 100644 --- a/pkg/user/user.go +++ b/pkg/user/user.go @@ -9,6 +9,15 @@ import ( "strings" ) +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) +) + type User struct { Name string Pass string @@ -194,6 +203,9 @@ func GetUserGroupSupplementary(userSpec string, defaultUid int, defaultGid int) // not numeric - we have to bail return 0, 0, nil, fmt.Errorf("Unable to find user %v", userArg) } + if uid < minId || uid > maxId { + return 0, 0, nil, ErrRange + } // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit } @@ -226,6 +238,9 @@ func GetUserGroupSupplementary(userSpec string, defaultUid int, defaultGid int) // not numeric - we have to bail return 0, 0, nil, fmt.Errorf("Unable to find group %v", groupArg) } + if gid < minId || gid > maxId { + return 0, 0, nil, ErrRange + } // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit } diff --git a/registry/MAINTAINERS b/registry/MAINTAINERS index bf3984f5f9..af791fb40c 100644 --- a/registry/MAINTAINERS +++ b/registry/MAINTAINERS @@ -1,3 +1,4 @@ Sam Alba (@samalba) Joffrey Fuhrer (@shin-) Ken Cochrane (@kencochrane) +Vincent Batts (@vbatts) diff --git a/runconfig/config.go b/runconfig/config.go index 33a7882b6f..8a069c64c7 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -12,9 +12,10 @@ type Config struct { Hostname string Domainname string User string - Memory int64 // Memory limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap - CpuShares int64 // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 // CPU shares (relative weight vs. other containers) + Cpuset string // Cpuset 0-2, 0,1 AttachStdin bool AttachStdout bool AttachStderr bool @@ -41,6 +42,7 @@ func ContainerConfigFromJob(job *engine.Job) *Config { Memory: job.GetenvInt64("Memory"), MemorySwap: job.GetenvInt64("MemorySwap"), CpuShares: job.GetenvInt64("CpuShares"), + Cpuset: job.Getenv("Cpuset"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStdout: job.GetenvBool("AttachStdout"), AttachStderr: job.GetenvBool("AttachStderr"), diff --git a/runconfig/config_test.go b/runconfig/config_test.go index f71528ff8e..b426253b9e 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -1,9 +1,10 @@ package runconfig import ( - "github.com/dotcloud/docker/nat" "strings" "testing" + + "github.com/dotcloud/docker/nat" ) func parse(t *testing.T, args string) (*Config, *HostConfig, error) { @@ -93,32 +94,20 @@ func TestParseRunVolumes(t *testing.T) { t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) } - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) } - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" { + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" { + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) } else if _, exists := config.Volumes["/containerVar"]; !exists { t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } diff --git a/runconfig/merge.go b/runconfig/merge.go index 1240dbcacd..e30b4cec24 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -1,9 +1,10 @@ package runconfig import ( + "strings" + "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/utils" - "strings" ) func Merge(userConf, imageConf *Config) error { @@ -65,15 +66,6 @@ func Merge(userConf, imageConf *Config) error { } } - if !userConf.Tty { - userConf.Tty = imageConf.Tty - } - if !userConf.OpenStdin { - userConf.OpenStdin = imageConf.OpenStdin - } - if !userConf.StdinOnce { - userConf.StdinOnce = imageConf.StdinOnce - } if userConf.Env == nil || len(userConf.Env) == 0 { userConf.Env = imageConf.Env } else { @@ -91,6 +83,7 @@ func Merge(userConf, imageConf *Config) error { } } } + if userConf.Cmd == nil || len(userConf.Cmd) == 0 { userConf.Cmd = imageConf.Cmd } diff --git a/runconfig/parse.go b/runconfig/parse.go index 9142b175af..0fa287adb1 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -10,13 +10,15 @@ import ( "github.com/dotcloud/docker/opts" flag "github.com/dotcloud/docker/pkg/mflag" "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/dotcloud/docker/pkg/units" "github.com/dotcloud/docker/utils" ) var ( - ErrInvalidWorikingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") + ErrInvalidWorkingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and --net") ) //FIXME Only used in tests @@ -62,6 +64,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCpuset = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)") flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:': reuses another container network stack\n'host': use the host network stack inside the contaner") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") @@ -95,12 +98,16 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf return nil, nil, cmd, ErrConflictAttachDetach } if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { - return nil, nil, cmd, ErrInvalidWorikingDirectory + return nil, nil, cmd, ErrInvalidWorkingDirectory } if *flDetach && *flAutoRemove { return nil, nil, cmd, ErrConflictDetachAutoRemove } + if *flNetMode != "bridge" && *flHostname != "" { + return nil, nil, cmd, ErrConflictNetworkHostname + } + // If neither -d or -a are set, attach to everything by default if flAttach.Len() == 0 && !*flDetach { if !*flDetach { @@ -114,7 +121,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf var flMemory int64 if *flMemoryString != "" { - parsedMemory, err := utils.RAMInBytes(*flMemoryString) + parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return nil, nil, cmd, err } @@ -128,8 +135,8 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf if arr[0] == "/" { return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'") } - dstDir := arr[1] - flVolumes.Set(dstDir) + // after creating the bind mount we want to delete it from the flVolumes values because + // we do not want bind mounts being committed to image configs binds = append(binds, bind) flVolumes.Delete(bind) } else if bind == "/" { @@ -214,6 +221,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf OpenStdin: *flStdin, Memory: flMemory, CpuShares: *flCpuShares, + Cpuset: *flCpuset, AttachStdin: flAttach.Get("stdin"), AttachStdout: flAttach.Get("stdout"), AttachStderr: flAttach.Get("stderr"), diff --git a/server/MAINTAINERS b/server/MAINTAINERS index aee10c8421..3564d3db47 100644 --- a/server/MAINTAINERS +++ b/server/MAINTAINERS @@ -1 +1,2 @@ Solomon Hykes (@shykes) +Victor Vieux (@vieux) \ No newline at end of file diff --git a/server/buildfile.go b/server/buildfile.go index 24b0b58f25..d206664445 100644 --- a/server/buildfile.go +++ b/server/buildfile.go @@ -16,10 +16,13 @@ import ( "regexp" "sort" "strings" + "syscall" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemon" "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/pkg/symlink" + "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" @@ -49,7 +52,9 @@ type buildFile struct { verbose bool utilizeCache bool rm bool + forceRm bool + authConfig *registry.AuthConfig configFile *registry.ConfigFile tmpContainers map[string]struct{} @@ -80,10 +85,20 @@ func (b *buildFile) CmdFrom(name string) error { if err != nil { if b.daemon.Graph().IsNotExist(err) { remote, tag := utils.ParseRepositoryTag(name) + pullRegistryAuth := b.authConfig + if len(b.configFile.Configs) > 0 { + // The request came with a full auth config file, we prefer to use that + endpoint, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return err + } + resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) + pullRegistryAuth = &resolvedAuth + } job := b.srv.Eng.Job("pull", remote, tag) job.SetenvBool("json", b.sf.Json()) job.SetenvBool("parallel", true) - job.SetenvJson("auth", b.configFile) + job.SetenvJson("authConfig", pullRegistryAuth) job.Stdout.Add(b.outOld) if err := job.Run(); err != nil { return err @@ -386,22 +401,32 @@ func (b *buildFile) checkPathForAddition(orig string) error { func (b *buildFile) addContext(container *daemon.Container, orig, dest string, remote bool) error { var ( - err error - origPath = path.Join(b.contextPath, orig) - destPath = path.Join(container.RootfsPath(), dest) + err error + destExists = true + origPath = path.Join(b.contextPath, orig) + destPath = path.Join(container.RootfsPath(), dest) ) if destPath != container.RootfsPath() { - destPath, err = utils.FollowSymlinkInScope(destPath, container.RootfsPath()) + destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) if err != nil { return err } } // Preserve the trailing '/' - if strings.HasSuffix(dest, "/") { + if strings.HasSuffix(dest, "/") || dest == "." { destPath = destPath + "/" } + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + destExists = false + } + fi, err := os.Stat(origPath) if err != nil { if os.IsNotExist(err) { @@ -410,45 +435,29 @@ func (b *buildFile) addContext(container *daemon.Container, orig, dest string, r return err } - chownR := func(destPath string, uid, gid int) error { - return filepath.Walk(destPath, func(path string, info os.FileInfo, err error) error { - if err := os.Lchown(path, uid, gid); err != nil { - return err - } - return nil - }) - } - if fi.IsDir() { - if err := archive.CopyWithTar(origPath, destPath); err != nil { - return err - } - if err := chownR(destPath, 0, 0); err != nil { - return err - } - return nil - } - - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in / . - tarDest := destPath - if strings.HasSuffix(tarDest, "/") { - tarDest = filepath.Dir(destPath) + return copyAsDirectory(origPath, destPath, destExists) } // If we are adding a remote file, do not try to untar it if !remote { + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in / . + tarDest := destPath + if strings.HasSuffix(tarDest, "/") { + tarDest = filepath.Dir(destPath) + } + // try to successfully untar the orig if err := archive.UntarPath(origPath, tarDest); err == nil { return nil + } else if err != io.EOF { + utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) } - utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err) } - // If that fails, just copy it as a regular file - // but do not use all the magic path handling for the tar path if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { return err } @@ -456,10 +465,12 @@ func (b *buildFile) addContext(container *daemon.Container, orig, dest string, r return err } - if err := chownR(destPath, 0, 0); err != nil { - return err + resPath := destPath + if destExists && destStat.IsDir() { + resPath = path.Join(destPath, path.Base(origPath)) } - return nil + + return fixPermissions(resPath, 0, 0) } func (b *buildFile) CmdAdd(args string) error { @@ -483,6 +494,7 @@ func (b *buildFile) CmdAdd(args string) error { cmd := b.config.Cmd b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)} + defer func(cmd []string) { b.config.Cmd = cmd }(cmd) b.config.Image = b.image var ( @@ -521,6 +533,11 @@ func (b *buildFile) CmdAdd(args string) error { } tmpFile.Close() + // Remove the mtime of the newly created tmp file + if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { + return err + } + origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // Process the checksum @@ -528,7 +545,10 @@ func (b *buildFile) CmdAdd(args string) error { if err != nil { return err } - tarSum := utils.TarSum{Reader: r, DisableCompression: true} + tarSum := &utils.TarSum{Reader: r, DisableCompression: true} + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return err + } remoteHash = tarSum.Sum(nil) r.Close() @@ -599,7 +619,7 @@ func (b *buildFile) CmdAdd(args string) error { } } - // Create the container and start it + // Create the container container, _, err := b.daemon.Create(b.config, "") if err != nil { return err @@ -618,7 +638,6 @@ func (b *buildFile) CmdAdd(args string) error { if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil { return err } - b.config.Cmd = cmd return nil } @@ -628,7 +647,7 @@ func (b *buildFile) create() (*daemon.Container, error) { } b.config.Image = b.image - // Create the container and start it + // Create the container c, _, err := b.daemon.Create(b.config, "") if err != nil { return nil, err @@ -769,6 +788,9 @@ func (b *buildFile) Build(context io.Reader) (string, error) { continue } if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { + if b.forceRm { + b.clearTmp(b.tmpContainers) + } return "", err } else if b.rm { b.clearTmp(b.tmpContainers) @@ -821,7 +843,38 @@ func stripComments(raw []byte) string { return strings.Join(out, "\n") } -func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, configFile *registry.ConfigFile) BuildFile { +func copyAsDirectory(source, destination string, destinationExists bool) error { + if err := archive.CopyWithTar(source, destination); err != nil { + return err + } + + if destinationExists { + files, err := ioutil.ReadDir(source) + if err != nil { + return err + } + + for _, file := range files { + if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { + return err + } + } + return nil + } + + return fixPermissions(destination, 0, 0) +} + +func fixPermissions(destination string, uid, gid int) error { + return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { + if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { + return err + } + return nil + }) +} + +func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { return &buildFile{ daemon: srv.daemon, srv: srv, @@ -833,8 +886,10 @@ func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeC verbose: verbose, utilizeCache: utilizeCache, rm: rm, + forceRm: forceRm, sf: sf, - configFile: configFile, + authConfig: auth, + configFile: authConfigFile, outOld: outOld, } } diff --git a/server/server.go b/server/server.go index 47565f0022..6d398807bd 100644 --- a/server/server.go +++ b/server/server.go @@ -34,14 +34,13 @@ import ( gosignal "os/signal" "path" "path/filepath" - goruntime "runtime" + "runtime" "strconv" "strings" "sync" "syscall" "time" - "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemon" "github.com/dotcloud/docker/daemonconfig" @@ -56,6 +55,17 @@ import ( "github.com/dotcloud/docker/utils" ) +func (srv *Server) handlerWrap(h engine.Handler) engine.Handler { + return func(job *engine.Job) engine.Status { + if !srv.IsRunning() { + return job.Errorf("Server is not running") + } + srv.tasks.Add(1) + defer srv.tasks.Done() + return h(job) + } +} + // jobInitApi runs the remote api server `srv` as a daemon, // Only one api server can run at the same time - this is enforced by a pidfile. // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. @@ -113,7 +123,7 @@ func InitServer(job *engine.Job) engine.Status { "start": srv.ContainerStart, "kill": srv.ContainerKill, "wait": srv.ContainerWait, - "tag": srv.ImageTag, + "tag": srv.ImageTag, // FIXME merge with "image_tag" "resize": srv.ContainerResize, "commit": srv.ContainerCommit, "info": srv.DockerInfo, @@ -128,21 +138,30 @@ func InitServer(job *engine.Job) engine.Status { "logs": srv.ContainerLogs, "changes": srv.ContainerChanges, "top": srv.ContainerTop, - "version": srv.DockerVersion, "load": srv.ImageLoad, "build": srv.Build, "pull": srv.ImagePull, "import": srv.ImageImport, "image_delete": srv.ImageDelete, - "inspect": srv.JobInspect, "events": srv.Events, "push": srv.ImagePush, "containers": srv.Containers, } { - if err := job.Eng.Register(name, handler); err != nil { + if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil { return job.Error(err) } } + // Install image-related commands from the image subsystem. + // See `graph/service.go` + if err := srv.daemon.Repositories().Install(job.Eng); err != nil { + return job.Error(err) + } + // Install daemon-related commands from the daemon subsystem. + // See `daemon/` + if err := srv.daemon.Install(job.Eng); err != nil { + return job.Error(err) + } + srv.SetRunning(true) return engine.StatusOK } @@ -195,13 +214,22 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { return engine.StatusOK } +func (srv *Server) EvictListener(from int64) { + srv.Lock() + if old, ok := srv.listeners[from]; ok { + delete(srv.listeners, from) + close(old) + } + srv.Unlock() +} + func (srv *Server) Events(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s FROM", job.Name) + if len(job.Args) != 0 { + return job.Errorf("Usage: %s", job.Name) } var ( - from = job.Args[0] + from = time.Now().UTC().UnixNano() since = job.GetenvInt64("since") until = job.GetenvInt64("until") timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) @@ -212,15 +240,7 @@ func (srv *Server) Events(job *engine.Job) engine.Status { return fmt.Errorf("JSON error") } _, err = job.Stdout.Write(b) - if err != nil { - // On error, evict the listener - utils.Errorf("%s", err) - srv.Lock() - delete(srv.listeners, from) - srv.Unlock() - return err - } - return nil + return err } listener := make(chan utils.JSONMessage) @@ -241,8 +261,9 @@ func (srv *Server) Events(job *engine.Job) engine.Status { continue } if err != nil { - job.Error(err) - return engine.StatusErr + // On error, evict the listener + srv.EvictListener(from) + return job.Error(err) } } } @@ -254,12 +275,17 @@ func (srv *Server) Events(job *engine.Job) engine.Status { } for { select { - case event := <-listener: + case event, ok := <-listener: + if !ok { // Channel is closed: listener was evicted + return engine.StatusOK + } err := sendEvent(&event) if err != nil && err.Error() == "JSON error" { continue } if err != nil { + // On error, evict the listener + srv.EvictListener(from) return job.Error(err) } case <-timeout.C: @@ -317,12 +343,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { } if rootRepo != nil { for _, id := range rootRepo { - image, err := srv.ImageInspect(id) - if err != nil { - return job.Error(err) - } - - if err := srv.exportImage(image, tempdir); err != nil { + if err := srv.exportImage(job.Eng, id, tempdir); err != nil { return job.Error(err) } } @@ -336,11 +357,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { return job.Error(err) } } else { - image, err := srv.ImageInspect(name) - if err != nil { - return job.Error(err) - } - if err := srv.exportImage(image, tempdir); err != nil { + if err := srv.exportImage(job.Eng, name, tempdir); err != nil { return job.Error(err) } } @@ -354,13 +371,14 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { if _, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) } + utils.Debugf("End Serializing %s", name) return engine.StatusOK } -func (srv *Server) exportImage(img *image.Image, tempdir string) error { - for i := img; i != nil; { +func (srv *Server) exportImage(eng *engine.Engine, name, tempdir string) error { + for n := name; n != ""; { // temporary directory - tmpImageDir := path.Join(tempdir, i.ID) + tmpImageDir := path.Join(tempdir, n) if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil { if os.IsExist(err) { return nil @@ -376,44 +394,34 @@ func (srv *Server) exportImage(img *image.Image, tempdir string) error { } // serialize json - b, err := json.Marshal(i) + json, err := os.Create(path.Join(tmpImageDir, "json")) if err != nil { return err } - if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.FileMode(0644)); err != nil { + job := eng.Job("image_inspect", n) + job.Stdout.Add(json) + if err := job.Run(); err != nil { return err } // serialize filesystem - fs, err := i.TarLayer() - if err != nil { - return err - } - defer fs.Close() - fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) if err != nil { return err } - if written, err := io.Copy(fsTar, fs); err != nil { - return err - } else { - utils.Debugf("rendered layer for %s of [%d] size", i.ID, written) - } - - if err = fsTar.Close(); err != nil { + job = eng.Job("image_tarlayer", n) + job.Stdout.Add(fsTar) + if err := job.Run(); err != nil { return err } // find parent - if i.Parent != "" { - i, err = srv.ImageInspect(i.Parent) - if err != nil { - return err - } - } else { - i = nil + job = eng.Job("image_get", n) + info, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + return err } + n = info.Get("Parent") } return nil } @@ -428,11 +436,14 @@ func (srv *Server) Build(job *engine.Job) engine.Status { suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") + forceRm = job.GetenvBool("forcerm") + authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string context io.ReadCloser ) - job.GetenvJson("auth", configFile) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("configFile", configFile) repoName, tag = utils.ParseRepositoryTag(repoName) if remoteURL == "" { @@ -484,7 +495,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status { Writer: job.Stdout, StreamFormatter: sf, }, - !suppressOutput, !noCache, rm, job.Stdout, sf, configFile) + !suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile) id, err := b.Build(context) if err != nil { return job.Error(err) @@ -536,7 +547,7 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status { for _, d := range dirs { if d.IsDir() { - if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil { + if err := srv.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil { return job.Error(err) } } @@ -563,8 +574,8 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) recursiveLoad(address, tmpImageDir string) error { - if _, err := srv.ImageInspect(address); err != nil { +func (srv *Server) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error { + if err := eng.Job("image_get", address).Run(); err != nil { utils.Debugf("Loading %s", address) imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) @@ -585,7 +596,7 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error { } if img.Parent != "" { if !srv.daemon.Graph().Exists(img.Parent) { - if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil { + if err := srv.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil { return err } } @@ -787,7 +798,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { v.SetBool("IPv4Forwarding", !srv.daemon.SystemConfig().IPv4ForwardingDisabled) v.SetBool("Debug", os.Getenv("DEBUG") != "") v.SetInt("NFd", utils.GetTotalUsedFds()) - v.SetInt("NGoroutines", goruntime.NumGoroutine()) + v.SetInt("NGoroutines", runtime.NumGoroutine()) v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name()) v.SetInt("NEventsListener", len(srv.listeners)) v.Set("KernelVersion", kernelVersion) @@ -800,23 +811,6 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) DockerVersion(job *engine.Job) engine.Status { - v := &engine.Env{} - v.Set("Version", dockerversion.VERSION) - v.SetJson("ApiVersion", api.APIVERSION) - v.Set("GitCommit", dockerversion.GITCOMMIT) - v.Set("GoVersion", goruntime.Version()) - v.Set("Os", goruntime.GOOS) - v.Set("Arch", goruntime.GOARCH) - if kernelVersion, err := utils.GetKernelVersion(); err == nil { - v.Set("KernelVersion", kernelVersion.String()) - } - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - func (srv *Server) ImageHistory(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) @@ -1055,8 +1049,12 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { if container == nil { return job.Errorf("No such container: %s", name) } - var config = container.Config - var newConfig runconfig.Config + + var ( + config = container.Config + newConfig runconfig.Config + ) + if err := job.GetenvJson("config", &newConfig); err != nil { return job.Error(err) } @@ -1341,23 +1339,16 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) - authConfig registry.AuthConfig - configFile = ®istry.ConfigFile{} + authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) if len(job.Args) > 1 { tag = job.Args[1] } - job.GetenvJson("auth", configFile) + job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) - endpoint, _, err := registry.ResolveRepositoryName(localName) - if err != nil { - return job.Error(err) - } - authConfig = configFile.ResolveAuthConfig(endpoint) - c, err := srv.poolAdd("pull", localName+":"+tag) if err != nil { if c != nil { @@ -1376,12 +1367,12 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { return job.Error(err) } - endpoint, err = registry.ExpandAndVerifyRegistryUrl(hostname) + endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } - r, err := registry.NewRegistry(&authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint) + r, err := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint) if err != nil { return job.Error(err) } @@ -2048,37 +2039,6 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag return match, nil } -func (srv *Server) RegisterLinks(container *daemon.Container, hostConfig *runconfig.HostConfig) error { - daemon := srv.daemon - - if hostConfig != nil && hostConfig.Links != nil { - for _, l := range hostConfig.Links { - parts, err := utils.PartParser("name:alias", l) - if err != nil { - return err - } - child, err := srv.daemon.GetByName(parts["name"]) - if err != nil { - return err - } - if child == nil { - return fmt.Errorf("Could not get container for %s", parts["name"]) - } - if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil { - return err - } - } - - // After we load all the links into the daemon - // set them to nil on the hostconfig - hostConfig.Links = nil - if err := container.WriteHostConfig(); err != nil { - return err - } - } - return nil -} - func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) @@ -2119,7 +2079,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { } } // Register any links from the host config before starting the container - if err := srv.RegisterLinks(container, hostConfig); err != nil { + if err := srv.daemon.RegisterLinks(container, hostConfig); err != nil { return job.Error(err) } container.SetHostConfig(hostConfig) @@ -2380,64 +2340,6 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) ContainerInspect(name string) (*daemon.Container, error) { - if container := srv.daemon.Get(name); container != nil { - return container, nil - } - return nil, fmt.Errorf("No such container: %s", name) -} - -func (srv *Server) ImageInspect(name string) (*image.Image, error) { - if image, err := srv.daemon.Repositories().LookupImage(name); err == nil && image != nil { - return image, nil - } - return nil, fmt.Errorf("No such image: %s", name) -} - -func (srv *Server) JobInspect(job *engine.Job) engine.Status { - // TODO: deprecate KIND/conflict - if n := len(job.Args); n != 2 { - return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name) - } - var ( - name = job.Args[0] - kind = job.Args[1] - object interface{} - conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images - image, errImage = srv.ImageInspect(name) - container, errContainer = srv.ContainerInspect(name) - ) - - if conflict && image != nil && container != nil { - return job.Errorf("Conflict between containers and images") - } - - switch kind { - case "image": - if errImage != nil { - return job.Error(errImage) - } - object = image - case "container": - if errContainer != nil { - return job.Error(errContainer) - } - object = &struct { - *daemon.Container - HostConfig *runconfig.HostConfig - }{container, container.HostConfig()} - default: - return job.Errorf("Unknown kind: %s", kind) - } - - b, err := json.Marshal(object) - if err != nil { - return job.Error(err) - } - job.Stdout.Write(b) - return engine.StatusOK -} - func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) @@ -2475,8 +2377,7 @@ func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) pullingPool: make(map[string]chan struct{}), pushingPool: make(map[string]chan struct{}), events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events - listeners: make(map[string]chan utils.JSONMessage), - running: true, + listeners: make(map[int64]chan utils.JSONMessage), } daemon.SetServer(srv) return srv, nil @@ -2525,6 +2426,16 @@ func (srv *Server) Close() error { return nil } srv.SetRunning(false) + done := make(chan struct{}) + go func() { + srv.tasks.Wait() + close(done) + }() + select { + // Waiting server jobs for 15 seconds, shutdown immediately after that time + case <-time.After(time.Second * 15): + case <-done: + } if srv.daemon == nil { return nil } @@ -2537,7 +2448,8 @@ type Server struct { pullingPool map[string]chan struct{} pushingPool map[string]chan struct{} events []utils.JSONMessage - listeners map[string]chan utils.JSONMessage + listeners map[int64]chan utils.JSONMessage Eng *engine.Engine running bool + tasks sync.WaitGroup } diff --git a/server/server_unit_test.go b/server/server_unit_test.go index b471c5c581..47e4be8280 100644 --- a/server/server_unit_test.go +++ b/server/server_unit_test.go @@ -1,9 +1,10 @@ package server import ( - "github.com/dotcloud/docker/utils" "testing" "time" + + "github.com/dotcloud/docker/utils" ) func TestPools(t *testing.T) { @@ -47,14 +48,14 @@ func TestPools(t *testing.T) { func TestLogEvent(t *testing.T) { srv := &Server{ events: make([]utils.JSONMessage, 0, 64), - listeners: make(map[string]chan utils.JSONMessage), + listeners: make(map[int64]chan utils.JSONMessage), } srv.LogEvent("fakeaction", "fakeid", "fakeimage") listener := make(chan utils.JSONMessage) srv.Lock() - srv.listeners["test"] = listener + srv.listeners[1337] = listener srv.Unlock() srv.LogEvent("fakeaction2", "fakeid", "fakeimage") diff --git a/sysinit/README.md b/sysinit/README.md new file mode 100644 index 0000000000..c28d0298b8 --- /dev/null +++ b/sysinit/README.md @@ -0,0 +1,4 @@ +Sys Init code + +This code is run INSIDE the container and is responsible for setting +up the environment before running the actual process diff --git a/utils/http.go b/utils/http.go index 68e93d8eb9..e193633792 100644 --- a/utils/http.go +++ b/utils/http.go @@ -1,7 +1,6 @@ package utils import ( - "bytes" "io" "net/http" "strings" @@ -15,11 +14,13 @@ type VersionInfo interface { } func validVersion(version VersionInfo) bool { - stopChars := " \t\r\n/" - if strings.ContainsAny(version.Name(), stopChars) { + const stopChars = " \t\r\n/" + name := version.Name() + vers := version.Version() + if len(name) == 0 || strings.ContainsAny(name, stopChars) { return false } - if strings.ContainsAny(version.Version(), stopChars) { + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { return false } return true @@ -36,27 +37,18 @@ func appendVersions(base string, versions ...VersionInfo) string { return base } - var buf bytes.Buffer + verstrs := make([]string, 0, 1+len(versions)) if len(base) > 0 { - buf.Write([]byte(base)) + verstrs = append(verstrs, base) } for _, v := range versions { - name := []byte(v.Name()) - version := []byte(v.Version()) - - if len(name) == 0 || len(version) == 0 { - continue - } if !validVersion(v) { continue } - buf.Write([]byte(v.Name())) - buf.Write([]byte("/")) - buf.Write([]byte(v.Version())) - buf.Write([]byte(" ")) + verstrs = append(verstrs, v.Name()+"/"+v.Version()) } - return buf.String() + return strings.Join(verstrs, " ") } // HTTPRequestDecorator is used to change an instance of diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go index 6be421be94..d6546e3ee6 100644 --- a/utils/jsonmessage.go +++ b/utils/jsonmessage.go @@ -3,10 +3,12 @@ package utils import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/pkg/term" "io" "strings" "time" + + "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/pkg/units" ) type JSONError struct { @@ -41,11 +43,11 @@ func (p *JSONProgress) String() string { if p.Current <= 0 && p.Total <= 0 { return "" } - current := HumanSize(int64(p.Current)) + current := units.HumanSize(int64(p.Current)) if p.Total <= 0 { return fmt.Sprintf("%8v", current) } - total := HumanSize(int64(p.Total)) + total := units.HumanSize(int64(p.Total)) percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if width > 110 { pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", 50-percentage)) diff --git a/utils/signal_freebsd.go b/utils/signal_freebsd.go deleted file mode 100644 index 65a700e894..0000000000 --- a/utils/signal_freebsd.go +++ /dev/null @@ -1,42 +0,0 @@ -package utils - -import ( - "os" - "os/signal" - "syscall" -) - -func CatchAll(sigc chan os.Signal) { - signal.Notify(sigc, - syscall.SIGABRT, - syscall.SIGALRM, - syscall.SIGBUS, - syscall.SIGCHLD, - syscall.SIGCONT, - syscall.SIGFPE, - syscall.SIGHUP, - syscall.SIGILL, - syscall.SIGINT, - syscall.SIGIO, - syscall.SIGIOT, - syscall.SIGKILL, - syscall.SIGPIPE, - syscall.SIGPROF, - syscall.SIGQUIT, - syscall.SIGSEGV, - syscall.SIGSTOP, - syscall.SIGSYS, - syscall.SIGTERM, - syscall.SIGTRAP, - syscall.SIGTSTP, - syscall.SIGTTIN, - syscall.SIGTTOU, - syscall.SIGURG, - syscall.SIGUSR1, - syscall.SIGUSR2, - syscall.SIGVTALRM, - syscall.SIGWINCH, - syscall.SIGXCPU, - syscall.SIGXFSZ, - ) -} diff --git a/utils/tarsum_test.go b/utils/tarsum_test.go new file mode 100644 index 0000000000..52ddd64590 --- /dev/null +++ b/utils/tarsum_test.go @@ -0,0 +1,224 @@ +package utils + +import ( + "bytes" + "crypto/rand" + "fmt" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "io" + "io/ioutil" + "os" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:ac672ee85da9ab7f9667ae3c32841d3e42f33cc52c273c23341dabba1c8b0c8b"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#V", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + // double negatives! + ts := &TarSum{Reader: fh, DisableCompression: !layer.gzip} + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + } +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts := &TarSum{Reader: buf, DisableCompression: true} + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts := &TarSum{Reader: buf, DisableCompression: false} + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts := &TarSum{Reader: fh, DisableCompression: !isGzip} + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 0000000000..0f0ba4974d --- /dev/null +++ b/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000..dfd5c204ae Binary files /dev/null and b/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar differ diff --git a/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json b/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json new file mode 100644 index 0000000000..12c18a076f --- /dev/null +++ b/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json @@ -0,0 +1 @@ +{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0} \ No newline at end of file diff --git a/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar b/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar new file mode 100644 index 0000000000..880b3f2c56 Binary files /dev/null and b/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar differ diff --git a/utils/utils.go b/utils/utils.go index 4ef44b5617..1a140f650d 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -16,11 +16,11 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "runtime" "strconv" "strings" "sync" + "syscall" "time" "github.com/dotcloud/docker/dockerversion" @@ -83,79 +83,6 @@ func Errorf(format string, a ...interface{}) { logf("error", format, a...) } -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.) -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%f years", d.Hours()/24/365) -} - -// HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB") -func HumanSize(size int64) string { - i := 0 - var sizef float64 - sizef = float64(size) - units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - for sizef >= 1000.0 { - sizef = sizef / 1000.0 - i++ - } - return fmt.Sprintf("%.4g %s", sizef, units[i]) -} - -// Parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes or gibibytes, and returns the -// number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (bytes int64, err error) { - re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$") - if error != nil { - return -1, error - } - - matches := re.FindStringSubmatch(size) - - if len(matches) != 3 { - return -1, fmt.Errorf("Invalid size: '%s'", size) - } - - memLimit, error := strconv.ParseInt(matches[1], 10, 0) - if error != nil { - return -1, error - } - - unit := strings.ToLower(matches[2]) - - if unit == "k" { - memLimit *= 1024 - } else if unit == "m" { - memLimit *= 1024 * 1024 - } else if unit == "g" { - memLimit *= 1024 * 1024 * 1024 - } - - return memLimit, nil -} - func Trunc(s string, maxlen int) string { if len(s) <= maxlen { return s @@ -492,9 +419,7 @@ func NewTruncIndex(ids []string) (idx *TruncIndex) { return } -func (idx *TruncIndex) Add(id string) error { - idx.Lock() - defer idx.Unlock() +func (idx *TruncIndex) addId(id string) error { if strings.Contains(id, " ") { return fmt.Errorf("Illegal character: ' '") } @@ -503,10 +428,31 @@ func (idx *TruncIndex) Add(id string) error { } idx.ids[id] = true idx.bytes = append(idx.bytes, []byte(id+" ")...) + return nil +} + +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addId(id); err != nil { + return err + } idx.index = suffixarray.New(idx.bytes) return nil } +func (idx *TruncIndex) AddWithoutSuffixarrayUpdate(id string) error { + idx.Lock() + defer idx.Unlock() + return idx.addId(id) +} + +func (idx *TruncIndex) UpdateSuffixarray() { + idx.Lock() + defer idx.Unlock() + idx.index = suffixarray.New(idx.bytes) +} + func (idx *TruncIndex) Delete(id string) error { idx.Lock() defer idx.Unlock() @@ -875,22 +821,6 @@ func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { return fmt.Sprintf("%s://%s:%d", proto, host, port), nil } -func GetReleaseVersion() string { - resp, err := http.Get("https://get.docker.io/latest") - if err != nil { - return "" - } - defer resp.Body.Close() - if resp.ContentLength > 24 || resp.StatusCode != 200 { - return "" - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "" - } - return strings.TrimSpace(string(body)) -} - // Get a repos name and returns the right reposName + tag // The tag can be confusing because of a port in a repository name. // Ex: localhost.localdomain:5000/samalba/hipache:latest @@ -1091,3 +1021,70 @@ func ParseKeyValueOpt(opt string) (string, string, error) { } return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } + +// TreeSize walks a directory tree and returns its total size in bytes. +func TreeSize(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string) error { + var finalError error + + filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + _, err = filepath.Rel(srcPath, filePath) + if err != nil && os.IsPermission(err) { + return nil + } + + if _, err := os.Stat(filePath); err != nil && os.IsPermission(err) { + finalError = fmt.Errorf("can't stat '%s'", filePath) + return err + } + // skip checking if symlinks point to non-existing files, such symlinks can be useful + lstat, _ := os.Lstat(filePath) + if lstat.Mode()&os.ModeSymlink == os.ModeSymlink { + return err + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + finalError = fmt.Errorf("no permission to read from '%s'", filePath) + return err + } else { + currentFile.Close() + } + } + return nil + }) + return finalError +} diff --git a/utils/utils_test.go b/utils/utils_test.go index ccd212202c..83164c68dd 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -6,7 +6,6 @@ import ( "io" "io/ioutil" "os" - "strings" "testing" ) @@ -271,54 +270,6 @@ func TestCompareKernelVersion(t *testing.T) { -1) } -func TestHumanSize(t *testing.T) { - - size := strings.Trim(HumanSize(1000), " \t") - expect := "1 kB" - if size != expect { - t.Errorf("1000 -> expected '%s', got '%s'", expect, size) - } - - size = strings.Trim(HumanSize(1024), " \t") - expect = "1.024 kB" - if size != expect { - t.Errorf("1024 -> expected '%s', got '%s'", expect, size) - } -} - -func TestRAMInBytes(t *testing.T) { - assertRAMInBytes(t, "32", false, 32) - assertRAMInBytes(t, "32b", false, 32) - assertRAMInBytes(t, "32B", false, 32) - assertRAMInBytes(t, "32k", false, 32*1024) - assertRAMInBytes(t, "32K", false, 32*1024) - assertRAMInBytes(t, "32kb", false, 32*1024) - assertRAMInBytes(t, "32Kb", false, 32*1024) - assertRAMInBytes(t, "32Mb", false, 32*1024*1024) - assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024) - - assertRAMInBytes(t, "", true, -1) - assertRAMInBytes(t, "hello", true, -1) - assertRAMInBytes(t, "-32", true, -1) - assertRAMInBytes(t, " 32 ", true, -1) - assertRAMInBytes(t, "32 mb", true, -1) - assertRAMInBytes(t, "32m b", true, -1) - assertRAMInBytes(t, "32bm", true, -1) -} - -func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) { - actualBytes, err := RAMInBytes(size) - if (err != nil) && !expectError { - t.Errorf("Unexpected error parsing '%s': %s", size, err) - } - if (err == nil) && expectError { - t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes) - } - if actualBytes != expectedBytes { - t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes) - } -} - func TestParseHost(t *testing.T) { var ( defaultHttpHost = "127.0.0.1" diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go index e8b973c1fa..e363aa793e 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go @@ -38,6 +38,7 @@ const ( TypeXGlobalHeader = 'g' // global extended header TypeGNULongName = 'L' // Next file has a long name TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file ) // A Header represents a single header in a tar archive. diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go index 7cb6e649c7..920a9b08f9 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go @@ -29,12 +29,57 @@ const maxNanoSecondIntSize = 9 // The Next method advances to the next file in the archive (including the first), // and then it can be treated as an io.Reader to access the file's data. type Reader struct { - r io.Reader - err error - nb int64 // number of unread bytes for current file entry - pad int64 // amount of padding (ignored) after current file entry + r io.Reader + err error + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry } +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive. +type sparseFileReader struct { + rfr *regFileReader // reads the sparse-encoded file data + sp []sparseEntry // the sparse map for the file + pos int64 // keeps track of file position + tot int64 // total size of the file +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// Keywords for old GNU sparse headers +const ( + oldGNUSparseMainHeaderOffset = 386 + oldGNUSparseMainHeaderIsExtendedOffset = 482 + oldGNUSparseMainHeaderNumEntries = 4 + oldGNUSparseExtendedHeaderIsExtendedOffset = 504 + oldGNUSparseExtendedHeaderNumEntries = 21 + oldGNUSparseOffsetSize = 12 + oldGNUSparseNumBytesSize = 12 +) + // NewReader creates a new Reader reading from r. func NewReader(r io.Reader) *Reader { return &Reader{r: r} } @@ -64,6 +109,18 @@ func (tr *Reader) Next() (*Header, error) { tr.skipUnread() hdr = tr.readHeader() mergePAX(hdr, headers) + + // Check for a PAX format sparse file + sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers) + if err != nil { + tr.err = err + return nil, err + } + if sp != nil { + // Current file is a PAX format GNU sparse file. + // Set the current file reader to a sparse file reader. + tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} + } return hdr, nil case TypeGNULongName: // We have a GNU long name header. Its contents are the real file name. @@ -87,6 +144,67 @@ func (tr *Reader) Next() (*Header, error) { return hdr, tr.err } +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + // mergePAX merges well known headers according to PAX standard. // In general headers with the same name as those found // in the header struct overwrite those found in the header @@ -194,6 +312,11 @@ func parsePAX(r io.Reader) (map[string]string, error) { if err != nil { return nil, err } + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. + var sparseMap bytes.Buffer + headers := make(map[string]string) // Each record is constructed as // "%d %s=%s\n", length, keyword, value @@ -211,7 +334,7 @@ func parsePAX(r io.Reader) (map[string]string, error) { return nil, ErrHeader } // Extract everything between the decimal and the n -1 on the - // beginning to to eat the ' ', -1 on the end to skip the newline. + // beginning to eat the ' ', -1 on the end to skip the newline. var record []byte record, buf = buf[sp+1:n-1], buf[n:] // The first equals is guaranteed to mark the end of the key. @@ -221,7 +344,21 @@ func parsePAX(r io.Reader) (map[string]string, error) { return nil, ErrHeader } key, value := record[:eq], record[eq+1:] - headers[string(key)] = string(value) + + keyStr := string(key) + if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { + // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. + sparseMap.Write(value) + sparseMap.Write([]byte{','}) + } else { + // Normal key. Set the value in the headers map. + headers[keyStr] = string(value) + } + } + if sparseMap.Len() != 0 { + // Add sparse info to headers, chopping off the extra comma + sparseMap.Truncate(sparseMap.Len() - 1) + headers[paxGNUSparseMap] = sparseMap.String() } return headers, nil } @@ -268,8 +405,8 @@ func (tr *Reader) octal(b []byte) int64 { // skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding. func (tr *Reader) skipUnread() { - nr := tr.nb + tr.pad // number of bytes to skip - tr.nb, tr.pad = 0, 0 + nr := tr.numBytes() + tr.pad // number of bytes to skip + tr.curr, tr.pad = nil, 0 if sr, ok := tr.r.(io.Seeker); ok { if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil { return @@ -331,14 +468,14 @@ func (tr *Reader) readHeader() *Header { // so its magic bytes, like the rest of the block, are NULs. magic := string(s.next(8)) // contains version field as well. var format string - switch magic { - case "ustar\x0000": // POSIX tar (1003.1-1988) + switch { + case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) if string(header[508:512]) == "tar\x00" { format = "star" } else { format = "posix" } - case "ustar \x00": // old GNU tar + case magic == "ustar \x00": // old GNU tar format = "gnu" } @@ -373,30 +510,308 @@ func (tr *Reader) readHeader() *Header { // Maximum value of hdr.Size is 64 GB (12 octal digits), // so there's no risk of int64 overflowing. - tr.nb = int64(hdr.Size) - tr.pad = -tr.nb & (blockSize - 1) // blockSize is a power of two + nb := int64(hdr.Size) + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + + // Set the current file reader. + tr.curr = ®FileReader{r: tr.r, nb: nb} + + // Check for old GNU sparse format entry. + if hdr.Typeflag == TypeGNUSparse { + // Get the real size of the file. + hdr.Size = tr.octal(header[483:495]) + + // Read the sparse map. + sp := tr.readOldGNUSparseMap(header) + if tr.err != nil { + return nil + } + // Current file is a GNU sparse file. Update the current file reader. + tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} + } return hdr } +// A sparseEntry holds a single entry in a sparse file's sparse map. +// A sparse entry indicates the offset and size in a sparse file of a +// block of data. +type sparseEntry struct { + offset int64 + numBytes int64 +} + +// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, +// then one or more extension headers are used to store the rest of the sparse map. +func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { + isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 + spCap := oldGNUSparseMainHeaderNumEntries + if isExtended { + spCap += oldGNUSparseExtendedHeaderNumEntries + } + sp := make([]sparseEntry, 0, spCap) + s := slicer(header[oldGNUSparseMainHeaderOffset:]) + + // Read the four entries from the main tar header + for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { + offset := tr.octal(s.next(oldGNUSparseOffsetSize)) + numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) + if tr.err != nil { + tr.err = ErrHeader + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + for isExtended { + // There are more entries. Read an extension header and parse its entries. + sparseHeader := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { + return nil + } + isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 + s = slicer(sparseHeader) + for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { + offset := tr.octal(s.next(oldGNUSparseOffsetSize)) + numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) + if tr.err != nil { + tr.err = ErrHeader + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + } + return sp +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0. +// The sparse map is stored just before the file data and padded out to the nearest block boundary. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + buf := make([]byte, 2*blockSize) + sparseHeader := buf[:blockSize] + + // readDecimal is a helper function to read a decimal integer from the sparse map + // while making sure to read from the file in blocks of size blockSize + readDecimal := func() (int64, error) { + // Look for newline + nl := bytes.IndexByte(sparseHeader, '\n') + if nl == -1 { + if len(sparseHeader) >= blockSize { + // This is an error + return 0, ErrHeader + } + oldLen := len(sparseHeader) + newLen := oldLen + blockSize + if cap(sparseHeader) < newLen { + // There's more header, but we need to make room for the next block + copy(buf, sparseHeader) + sparseHeader = buf[:newLen] + } else { + // There's more header, and we can just reslice + sparseHeader = sparseHeader[:newLen] + } + + // Now that sparseHeader is large enough, read next block + if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil { + return 0, err + } + + // Look for a newline in the new data + nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n') + if nl == -1 { + // This is an error + return 0, ErrHeader + } + nl += oldLen // We want the position from the beginning + } + // Now that we've found a newline, read a number + n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0) + if err != nil { + return 0, ErrHeader + } + + // Update sparseHeader to consume this number + sparseHeader = sparseHeader[nl+1:] + return n, nil + } + + // Read the first block + if _, err := io.ReadFull(r, sparseHeader); err != nil { + return nil, err + } + + // The first line contains the number of entries + numEntries, err := readDecimal() + if err != nil { + return nil, err + } + + // Read all the entries + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + // Read the offset + offset, err := readDecimal() + if err != nil { + return nil, err + } + // Read numBytes + numBytes, err := readDecimal() + if err != nil { + return nil, err + } + + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1. +// The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) { + // Get number of entries + numEntriesStr, ok := headers[paxGNUSparseNumBlocks] + if !ok { + return nil, ErrHeader + } + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) + if err != nil { + return nil, ErrHeader + } + + sparseMap := strings.Split(headers[paxGNUSparseMap], ",") + + // There should be two numbers in sparseMap for each entry + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + // Read reads from the current entry in the tar archive. // It returns 0, io.EOF when it reaches the end of that entry, // until Next is called to advance to the next entry. func (tr *Reader) Read(b []byte) (n int, err error) { - if tr.nb == 0 { + if tr.curr == nil { + return 0, io.EOF + } + n, err = tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { // file consumed return 0, io.EOF } - - if int64(len(b)) > tr.nb { - b = b[0:tr.nb] + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] } - n, err = tr.r.Read(b) - tr.nb -= int64(n) + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) - if err == io.EOF && tr.nb > 0 { + if err == io.EOF && rfr.nb > 0 { err = io.ErrUnexpectedEOF } - tr.err = err return } + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// readHole reads a sparse file hole ending at offset toOffset +func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int { + n64 := toOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + if len(sfr.sp) == 0 { + // No more data fragments to read from. + if sfr.pos < sfr.tot { + // We're in the last hole + n = sfr.readHole(b, sfr.tot) + return + } + // Otherwise, we're at the end of the file + return 0, io.EOF + } + if sfr.pos < sfr.sp[0].offset { + // We're in a hole + n = sfr.readHole(b, sfr.sp[0].offset) + return + } + + // We're not in a hole, so we'll read from the next data fragment + posInFragment := sfr.pos - sfr.sp[0].offset + bytesLeft := sfr.sp[0].numBytes - posInFragment + if int64(len(b)) > bytesLeft { + b = b[0:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + + if int64(n) == bytesLeft { + // We're done with this fragment + sfr.sp = sfr.sp[1:] + } + + if err == io.EOF && sfr.pos < sfr.tot { + // We reached the end of the last fragment's data, but there's a final hole + err = nil + } + return +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.nb +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go index f84dbebe98..9601ffe459 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go @@ -9,6 +9,7 @@ import ( "crypto/md5" "fmt" "io" + "io/ioutil" "os" "reflect" "strings" @@ -54,8 +55,92 @@ var gnuTarTest = &untarTest{ }, } +var sparseTarTest = &untarTest{ + file: "testdata/sparse-formats.tar", + headers: []*Header{ + { + Name: "sparse-gnu", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392395740, 0), + Typeflag: 0x53, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392342187, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.1", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392340456, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-1.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392337404, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "end", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 4, + ModTime: time.Unix(1392398319, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + }, + cksums: []string{ + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "b0061974914468de549a2af8ced10316", + }, +} + var untarTests = []*untarTest{ gnuTarTest, + sparseTarTest, { file: "testdata/star.tar", headers: []*Header{ @@ -386,7 +471,7 @@ func TestParsePAXHeader(t *testing.T) { func TestParsePAXTime(t *testing.T) { // Some valid PAX time values timestamps := map[string]time.Time{ - "1350244992.023960108": time.Unix(1350244992, 23960108), // The commoon case + "1350244992.023960108": time.Unix(1350244992, 23960108), // The common case "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value "1350244992": time.Unix(1350244992, 0), // Low precision value @@ -423,3 +508,236 @@ func TestMergePAX(t *testing.T) { t.Errorf("incorrect merge: got %+v, want %+v", hdr, want) } } + +func TestSparseEndToEnd(t *testing.T) { + test := sparseTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + headers := test.headers + cksums := test.cksums + nread := 0 + + // loop over all files + for ; ; nread++ { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + break + } + + // check the header + if !reflect.DeepEqual(*hdr, *headers[nread]) { + t.Errorf("Incorrect header:\nhave %+v\nwant %+v", + *hdr, headers[nread]) + } + + // read and checksum the file data + h := md5.New() + _, err = io.Copy(h, tr) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // verify checksum + have := fmt.Sprintf("%x", h.Sum(nil)) + want := cksums[nread] + if want != have { + t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want) + } + } + if nread != len(headers) { + t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread) + } +} + +type sparseFileReadTest struct { + sparseData []byte + sparseMap []sparseEntry + realSize int64 + expected []byte +} + +var sparseFileReadTests = []sparseFileReadTest{ + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + realSize: 8, + expected: []byte("ab\x00\x00\x00cde"), + }, + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + realSize: 10, + expected: []byte("ab\x00\x00\x00cde\x00\x00"), + }, + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + realSize: 8, + expected: []byte("\x00abc\x00\x00de"), + }, + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + realSize: 10, + expected: []byte("\x00abc\x00\x00de\x00\x00"), + }, + { + sparseData: []byte(""), + sparseMap: nil, + realSize: 2, + expected: []byte("\x00\x00"), + }, +} + +func TestSparseFileReader(t *testing.T) { + for i, test := range sparseFileReadTests { + r := bytes.NewReader(test.sparseData) + nb := int64(r.Len()) + sfr := &sparseFileReader{ + rfr: ®FileReader{r: r, nb: nb}, + sp: test.sparseMap, + pos: 0, + tot: test.realSize, + } + if sfr.numBytes() != nb { + t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb) + } + buf, err := ioutil.ReadAll(sfr) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + } + if e := test.expected; !bytes.Equal(buf, e) { + t.Errorf("test %d: Contents = %v, want %v", i, buf, e) + } + if sfr.numBytes() != 0 { + t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i) + } + } +} + +func TestSparseIncrementalRead(t *testing.T) { + sparseMap := []sparseEntry{{10, 2}} + sparseData := []byte("Go") + expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00" + + r := bytes.NewReader(sparseData) + nb := int64(r.Len()) + sfr := &sparseFileReader{ + rfr: ®FileReader{r: r, nb: nb}, + sp: sparseMap, + pos: 0, + tot: int64(len(expected)), + } + + // We'll read the data 6 bytes at a time, with a hole of size 10 at + // the beginning and one of size 8 at the end. + var outputBuf bytes.Buffer + buf := make([]byte, 6) + for { + n, err := sfr.Read(buf) + if err == io.EOF { + break + } + if err != nil { + t.Errorf("Read: unexpected error %v\n", err) + } + if n > 0 { + _, err := outputBuf.Write(buf[:n]) + if err != nil { + t.Errorf("Write: unexpected error %v\n", err) + } + } + } + got := outputBuf.String() + if got != expected { + t.Errorf("Contents = %v, want %v", got, expected) + } +} + +func TestReadGNUSparseMap0x1(t *testing.T) { + headers := map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + } + expected := []sparseEntry{ + {offset: 0, numBytes: 5}, + {offset: 10, numBytes: 5}, + {offset: 20, numBytes: 5}, + {offset: 30, numBytes: 5}, + } + + sp, err := readGNUSparseMap0x1(headers) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(sp, expected) { + t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected) + } +} + +func TestReadGNUSparseMap1x0(t *testing.T) { + // This test uses lots of holes so the sparse header takes up more than two blocks + numEntries := 100 + expected := make([]sparseEntry, 0, numEntries) + sparseMap := new(bytes.Buffer) + + fmt.Fprintf(sparseMap, "%d\n", numEntries) + for i := 0; i < numEntries; i++ { + offset := int64(2048 * i) + numBytes := int64(1024) + expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes}) + fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes) + } + + // Make the header the smallest multiple of blockSize that fits the sparseMap + headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize + bufLen := blockSize * headerBlocks + buf := make([]byte, bufLen) + copy(buf, sparseMap.Bytes()) + + // Get an reader to read the sparse map + r := bytes.NewReader(buf) + + // Read the sparse map + sp, err := readGNUSparseMap1x0(r) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(sp, expected) { + t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected) + } +} + +func TestUninitializedRead(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + _, err = tr.Read([]byte{}) + if err == nil || err != io.EOF { + t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF) + } + +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar new file mode 100644 index 0000000000..8bd4e74d50 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar new file mode 100644 index 0000000000..5960ee8247 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go index 9ee9499297..6eff6f6f84 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go @@ -218,8 +218,8 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil) // Use the ustar magic if we used ustar long names. - if len(prefix) > 0 { - copy(header[257:265], []byte("ustar\000")) + if len(prefix) > 0 && !tw.usedBinary { + copy(header[257:265], []byte("ustar\x00")) } } } diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go index 2b9ea658db..512fab1a6f 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go @@ -103,6 +103,29 @@ var writerTests = []*writerTest{ }, }, }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt + // tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar + { + file: "testdata/writer-big-long.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "16gig.txt", + Mode: 0644, + Uid: 1000, + Gid: 1000, + Size: 16 << 30, + ModTime: time.Unix(1399583047, 0), + Typeflag: '0', + Uname: "guillaume", + Gname: "guillaume", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, // This file was produced using gnu tar 1.17 // gnutar -b 4 --format=ustar (longname/)*15 + file.txt { diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/methods.go b/vendor/src/github.com/coreos/go-systemd/dbus/methods.go index 11d5cda945..a60de059e6 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/methods.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/methods.go @@ -204,7 +204,7 @@ func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]i // to modify. properties are the settings to set, encoded as an array of property // name and value pairs. func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.sysobj.Call("SetUnitProperties", 0, name, runtime, properties).Store() + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() } func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { @@ -253,6 +253,48 @@ type UnitStatus struct { JobPath dbus.ObjectPath // The job object path } +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + // EnableUnitFiles() may be used to enable one or more units in the system (by // creating symlinks to them in /etc or /run). // @@ -317,7 +359,7 @@ type EnableUnitFileChange struct { // symlink. func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { result := make([][]interface{}, 0) - err := c.sysobj.Call("DisableUnitFiles", 0, files, runtime).Store(&result) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) if err != nil { return nil, err } diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go index d943e7ebfc..8c7ab93eb3 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go @@ -36,36 +36,38 @@ func setupConn(t *testing.T) *Conn { return conn } +func findFixture(target string, t *testing.T) string { + abs, err := filepath.Abs("../fixtures/" + target) + if err != nil { + t.Fatal(err) + } + return abs +} + func setupUnit(target string, conn *Conn, t *testing.T) { // Blindly stop the unit in case it is running conn.StopUnit(target, "replace") // Blindly remove the symlink in case it exists targetRun := filepath.Join("/run/systemd/system/", target) - err := os.Remove(targetRun) - - // 1. Enable the unit - abs, err := filepath.Abs("../fixtures/" + target) - if err != nil { - t.Fatal(err) - } + os.Remove(targetRun) +} +func linkUnit(target string, conn *Conn, t *testing.T) { + abs := findFixture(target, t) fixture := []string{abs} - install, changes, err := conn.EnableUnitFiles(fixture, true, true) + changes, err := conn.LinkUnitFiles(fixture, true, true) if err != nil { t.Fatal(err) } - if install != false { - t.Fatal("Install was true") - } - if len(changes) < 1 { t.Fatalf("Expected one change, got %v", changes) } - if changes[0].Filename != targetRun { + runPath := filepath.Join("/run/systemd/system/", target) + if changes[0].Filename != runPath { t.Fatal("Unexpected target filename") } } @@ -76,6 +78,7 @@ func TestStartStopUnit(t *testing.T) { conn := setupConn(t) setupUnit(target, conn, t) + linkUnit(target, conn, t) // 2. Start the unit job, err := conn.StartUnit(target, "replace") @@ -84,7 +87,7 @@ func TestStartStopUnit(t *testing.T) { } if job != "done" { - t.Fatal("Job is not done, %v", job) + t.Fatal("Job is not done:", job) } units, err := conn.ListUnits() @@ -130,28 +133,41 @@ func TestEnableDisableUnit(t *testing.T) { conn := setupConn(t) setupUnit(target, conn, t) + abs := findFixture(target, t) + runPath := filepath.Join("/run/systemd/system/", target) - abs, err := filepath.Abs("../fixtures/" + target) + // 1. Enable the unit + install, changes, err := conn.EnableUnitFiles([]string{abs}, true, true) if err != nil { t.Fatal(err) } - path := filepath.Join("/run/systemd/system/", target) + if install != false { + t.Fatal("Install was true") + } + + if len(changes) < 1 { + t.Fatalf("Expected one change, got %v", changes) + } + + if changes[0].Filename != runPath { + t.Fatal("Unexpected target filename") + } // 2. Disable the unit - changes, err := conn.DisableUnitFiles([]string{abs}, true) + dChanges, err := conn.DisableUnitFiles([]string{abs}, true) if err != nil { t.Fatal(err) } - if len(changes) != 1 { - t.Fatalf("Changes should include the path, %v", changes) + if len(dChanges) != 1 { + t.Fatalf("Changes should include the path, %v", dChanges) } - if changes[0].Filename != path { - t.Fatalf("Change should include correct filename, %+v", changes[0]) + if dChanges[0].Filename != runPath { + t.Fatalf("Change should include correct filename, %+v", dChanges[0]) } - if changes[0].Destination != "" { - t.Fatalf("Change destination should be empty, %+v", changes[0]) + if dChanges[0].Destination != "" { + t.Fatalf("Change destination should be empty, %+v", dChanges[0]) } } @@ -230,7 +246,7 @@ func TestSetUnitProperties(t *testing.T) { value := info["CPUShares"].(uint64) if value != 1023 { - t.Fatal("CPUShares of unit is not 1023, %s", value) + t.Fatal("CPUShares of unit is not 1023:", value) } } @@ -250,7 +266,7 @@ func TestStartStopTransientUnit(t *testing.T) { } if job != "done" { - t.Fatal("Job is not done, %v", job) + t.Fatal("Job is not done:", job) } units, err := conn.ListUnits() @@ -295,6 +311,7 @@ func TestConnJobListener(t *testing.T) { conn := setupConn(t) setupUnit(target, conn, t) + linkUnit(target, conn, t) jobSize := len(conn.jobListener.jobs) diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go index 3d896d896f..fcd29b6e8f 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go @@ -40,7 +40,6 @@ func (c *Conn) Subscribe() error { err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() if err != nil { - c.sysconn.Close() return err } @@ -51,7 +50,6 @@ func (c *Conn) Subscribe() error { func (c *Conn) Unsubscribe() error { err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() if err != nil { - c.sysconn.Close() return err } @@ -69,7 +67,11 @@ func (c *Conn) initDispatch() { go func() { for { - signal := <-ch + signal, ok := <-ch + if !ok { + return + } + switch signal.Name { case "org.freedesktop.systemd1.Manager.JobRemoved": c.jobComplete(signal) diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go index db600850c2..4ecd15376d 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go @@ -25,6 +25,7 @@ func TestSubscriptionSetUnit(t *testing.T) { subSet.Add(target) setupUnit(target, conn, t) + linkUnit(target, conn, t) job, err := conn.StartUnit(target, "replace") if err != nil { @@ -47,7 +48,7 @@ func TestSubscriptionSetUnit(t *testing.T) { tCh, ok := changes[target] if !ok { - t.Fatal("Unexpected event %v", changes) + t.Fatal("Unexpected event:", changes) } if tCh.ActiveState == "active" && tCh.Name == target { @@ -63,5 +64,3 @@ func TestSubscriptionSetUnit(t *testing.T) { success: return } - - diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go index 6f4d0b32a6..f2b5dfc28c 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go @@ -47,6 +47,7 @@ func TestSubscribeUnit(t *testing.T) { evChan, errChan := conn.SubscribeUnits(time.Second) setupUnit(target, conn, t) + linkUnit(target, conn, t) job, err := conn.StartUnit(target, "replace") if err != nil { diff --git a/vendor/src/github.com/coreos/go-systemd/journal/send.go b/vendor/src/github.com/coreos/go-systemd/journal/send.go index a29bcbf0fa..b52e120988 100644 --- a/vendor/src/github.com/coreos/go-systemd/journal/send.go +++ b/vendor/src/github.com/coreos/go-systemd/journal/send.go @@ -119,7 +119,7 @@ func appendVariable(w io.Writer, name, value string) { fmt.Fprintln(w, value) } else { /* just write the variable and value all on one line */ - fmt.Fprintln(w, "%s=%s", name, value) + fmt.Fprintf(w, "%s=%s\n", name, value) } } diff --git a/vendor/src/github.com/coreos/go-systemd/login1/dbus.go b/vendor/src/github.com/coreos/go-systemd/login1/dbus.go new file mode 100644 index 0000000000..d00dd110b5 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/login1/dbus.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Integration with the systemd logind API. See http://www.freedesktop.org/wiki/Software/systemd/logind/ +package login1 + +import ( + "os" + "strconv" + + "github.com/godbus/dbus" +) + +const ( + dbusInterface = "org.freedesktop.login1.Manager" + dbusPath = "/org/freedesktop/login1" +) + +// Conn is a connection to systemds dbus endpoint. +type Conn struct { + conn *dbus.Conn + object *dbus.Object +} + +// New() establishes a connection to the system bus and authenticates. +func New() (*Conn, error) { + c := new(Conn) + + if err := c.initConnection(); err != nil { + return nil, err + } + + return c, nil +} + +func (c *Conn) initConnection() error { + var err error + c.conn, err = dbus.SystemBusPrivate() + if err != nil { + return err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = c.conn.Auth(methods) + if err != nil { + c.conn.Close() + return err + } + + err = c.conn.Hello() + if err != nil { + c.conn.Close() + return err + } + + c.object = c.conn.Object("org.freedesktop.login1", dbus.ObjectPath(dbusPath)) + + return nil +} + +// Reboot asks logind for a reboot optionally asking for auth. +func (c *Conn) Reboot(askForAuth bool) { + c.object.Call(dbusInterface+".Reboot", 0, askForAuth) +} diff --git a/vendor/src/github.com/coreos/go-systemd/login1/dbus_test.go b/vendor/src/github.com/coreos/go-systemd/login1/dbus_test.go new file mode 100644 index 0000000000..4439d37380 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/login1/dbus_test.go @@ -0,0 +1,30 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package login1 + +import ( + "testing" +) + +// TestNew ensures that New() works without errors. +func TestNew(t *testing.T) { + _, err := New() + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/src/github.com/gorilla/context/.travis.yml b/vendor/src/github.com/gorilla/context/.travis.yml new file mode 100644 index 0000000000..d87d465768 --- /dev/null +++ b/vendor/src/github.com/gorilla/context/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/vendor/src/github.com/gorilla/context/README.md b/vendor/src/github.com/gorilla/context/README.md index 8ee62b4263..c60a31b053 100644 --- a/vendor/src/github.com/gorilla/context/README.md +++ b/vendor/src/github.com/gorilla/context/README.md @@ -1,5 +1,6 @@ context ======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) gorilla/context is a general purpose registry for global request variables. diff --git a/vendor/src/github.com/gorilla/context/context.go b/vendor/src/github.com/gorilla/context/context.go index 35d65561f3..a7f7d85bb4 100644 --- a/vendor/src/github.com/gorilla/context/context.go +++ b/vendor/src/github.com/gorilla/context/context.go @@ -11,7 +11,7 @@ import ( ) var ( - mutex sync.Mutex + mutex sync.RWMutex data = make(map[*http.Request]map[interface{}]interface{}) datat = make(map[*http.Request]int64) ) @@ -19,42 +19,64 @@ var ( // Set stores a value for a given key in a given request. func Set(r *http.Request, key, val interface{}) { mutex.Lock() - defer mutex.Unlock() if data[r] == nil { data[r] = make(map[interface{}]interface{}) datat[r] = time.Now().Unix() } data[r][key] = val + mutex.Unlock() } // Get returns a value stored for a given key in a given request. func Get(r *http.Request, key interface{}) interface{} { - mutex.Lock() - defer mutex.Unlock() + mutex.RLock() if data[r] != nil { + mutex.RUnlock() return data[r][key] } + mutex.RUnlock() return nil } // GetOk returns stored value and presence state like multi-value return of map access. func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.Lock() - defer mutex.Unlock() + mutex.RLock() if _, ok := data[r]; ok { value, ok := data[r][key] + mutex.RUnlock() return value, ok } + mutex.RUnlock() return nil, false } +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + mutex.RUnlock() + return context + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map. It returns not +// ok if the request was never registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + mutex.RUnlock() + return context, ok +} + // Delete removes a value stored for a given key in a given request. func Delete(r *http.Request, key interface{}) { mutex.Lock() - defer mutex.Unlock() if data[r] != nil { delete(data[r], key) } + mutex.Unlock() } // Clear removes all values stored for a given request. @@ -63,8 +85,8 @@ func Delete(r *http.Request, key interface{}) { // variables at the end of a request lifetime. See ClearHandler(). func Clear(r *http.Request) { mutex.Lock() - defer mutex.Unlock() clear(r) + mutex.Unlock() } // clear is Clear without the lock. @@ -84,7 +106,6 @@ func clear(r *http.Request) { // periodically until the problem is fixed. func Purge(maxAge int) int { mutex.Lock() - defer mutex.Unlock() count := 0 if maxAge <= 0 { count = len(data) @@ -92,13 +113,14 @@ func Purge(maxAge int) int { datat = make(map[*http.Request]int64) } else { min := time.Now().Unix() - int64(maxAge) - for r, _ := range data { + for r := range data { if datat[r] < min { clear(r) count++ } } } + mutex.Unlock() return count } diff --git a/vendor/src/github.com/gorilla/context/context_test.go b/vendor/src/github.com/gorilla/context/context_test.go index ff9e2ad5fc..6ada8ec31f 100644 --- a/vendor/src/github.com/gorilla/context/context_test.go +++ b/vendor/src/github.com/gorilla/context/context_test.go @@ -24,6 +24,7 @@ func TestContext(t *testing.T) { } r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) // Get() assertEqual(Get(r, key1), nil) @@ -51,6 +52,26 @@ func TestContext(t *testing.T) { assertEqual(value, nil) assertEqual(ok, true) + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + // Delete() Delete(r, key1) assertEqual(Get(r, key1), nil) @@ -64,3 +85,77 @@ func TestContext(t *testing.T) { Clear(r) assertEqual(len(data), 0) } + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/vendor/src/github.com/gorilla/context/doc.go b/vendor/src/github.com/gorilla/context/doc.go index 297606455c..73c7400311 100644 --- a/vendor/src/github.com/gorilla/context/doc.go +++ b/vendor/src/github.com/gorilla/context/doc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. /* -Package gorilla/context stores values shared during a request lifetime. +Package context stores values shared during a request lifetime. For example, a router can set variables extracted from the URL and later application handlers can access those values, or it can be used to store diff --git a/vendor/src/github.com/gorilla/mux/.travis.yml b/vendor/src/github.com/gorilla/mux/.travis.yml new file mode 100644 index 0000000000..d87d465768 --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/vendor/src/github.com/gorilla/mux/README.md b/vendor/src/github.com/gorilla/mux/README.md index f6db41ad81..e60301b033 100644 --- a/vendor/src/github.com/gorilla/mux/README.md +++ b/vendor/src/github.com/gorilla/mux/README.md @@ -1,5 +1,6 @@ mux === +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) gorilla/mux is a powerful URL router and dispatcher. diff --git a/vendor/src/github.com/gorilla/mux/doc.go b/vendor/src/github.com/gorilla/mux/doc.go index 8ee5540a4f..b2deed34c4 100644 --- a/vendor/src/github.com/gorilla/mux/doc.go +++ b/vendor/src/github.com/gorilla/mux/doc.go @@ -134,7 +134,7 @@ the inner routes use it as base for their paths: // "/products/{key}/" s.HandleFunc("/{key}/", ProductHandler) // "/products/{key}/details" - s.HandleFunc("/{key}/details"), ProductDetailsHandler) + s.HandleFunc("/{key}/details", ProductDetailsHandler) Now let's see how to build registered URLs. diff --git a/vendor/src/github.com/gorilla/mux/mux.go b/vendor/src/github.com/gorilla/mux/mux.go index 385717394c..8b23c39d39 100644 --- a/vendor/src/github.com/gorilla/mux/mux.go +++ b/vendor/src/github.com/gorilla/mux/mux.go @@ -14,7 +14,7 @@ import ( // NewRouter returns a new router instance. func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route)} + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} } // Router registers routes to be matched and dispatches a handler. @@ -46,6 +46,8 @@ type Router struct { namedRoutes map[string]*Route // See Router.StrictSlash(). This defines the flag for new routes. strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool } // Match matches registered routes against the request. @@ -65,6 +67,14 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { // Clean path to canonical form and redirect. if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + w.Header().Set("Location", p) w.WriteHeader(http.StatusMovedPermanently) return @@ -82,7 +92,9 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { } handler = r.NotFoundHandler } - defer context.Clear(req) + if !r.KeepContext { + defer context.Clear(req) + } handler.ServeHTTP(w, req) } @@ -97,14 +109,20 @@ func (r *Router) GetRoute(name string) *Route { return r.getNamedRoutes()[name] } -// StrictSlash defines the slash behavior for new routes. +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. // // When true, if the route path is "/path/", accessing "/path" will redirect -// to the former and vice versa. +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. // -// Special case: when a route sets a path prefix, strict slash is -// automatically set to false for that route because the redirect behavior -// can't be determined for prefixes. +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. func (r *Router) StrictSlash(value bool) *Router { r.strictSlash = value return r diff --git a/vendor/src/github.com/gorilla/mux/mux_test.go b/vendor/src/github.com/gorilla/mux/mux_test.go index 55159bd10d..0e2e48067a 100644 --- a/vendor/src/github.com/gorilla/mux/mux_test.go +++ b/vendor/src/github.com/gorilla/mux/mux_test.go @@ -8,16 +8,19 @@ import ( "fmt" "net/http" "testing" + + "github.com/gorilla/context" ) type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - host string // the expected host of the match - path string // the expected path of the match - shouldMatch bool // whether the request is expected to match the route at all + title string // title of the test + route *Route // the route being tested + request *http.Request // a request to test the route + vars map[string]string // the expected vars of the match + host string // the expected host of the match + path string // the expected path of the match + shouldMatch bool // whether the request is expected to match the route at all + shouldRedirect bool // whether the request should result in a redirect } func TestHost(t *testing.T) { @@ -149,6 +152,33 @@ func TestPath(t *testing.T) { path: "/111/222/333", shouldMatch: true, }, + { + title: "Path route, match with trailing slash in request and path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + }, + { + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: false, + }, { title: "Path route, wrong path in request in request URL", route: new(Route).Path("/111/222/333"), @@ -212,6 +242,15 @@ func TestPathPrefix(t *testing.T) { path: "/111", shouldMatch: true, }, + { + title: "PathPrefix route, match substring", + route: new(Route).PathPrefix("/1"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/1", + shouldMatch: true, + }, { title: "PathPrefix route, URL prefix in request does not match", route: new(Route).PathPrefix("/111"), @@ -414,6 +453,15 @@ func TestQueries(t *testing.T) { path: "", shouldMatch: true, }, + { + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, { title: "Queries route, bad query", route: new(Route).Queries("foo", "bar", "baz", "ding"), @@ -568,26 +616,74 @@ func TestNamedRoutes(t *testing.T) { } func TestStrictSlash(t *testing.T) { - var r *Router - var req *http.Request - var route *Route - var match *RouteMatch - var matched bool - - // StrictSlash should be ignored for path prefix. - // So we register a route ending in slash but it doesn't attempt to add - // the slash for a path not ending in slash. - r = NewRouter() + r := NewRouter() r.StrictSlash(true) - route = r.NewRoute().PathPrefix("/static/") - req, _ = http.NewRequest("GET", "http://localhost/static/logo.png", nil) - match = new(RouteMatch) - matched = r.Match(req, match) - if !matched { - t.Errorf("Should match request %q -- %v", req.URL.Path, getRouteTemplate(route)) + + tests := []routeTest{ + { + title: "Redirect path without slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path with slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Redirect path with slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path without slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Propagate StrictSlash to subrouters", + route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), + request: newRequest("GET", "http://localhost/static/images"), + vars: map[string]string{}, + host: "", + path: "/static/images/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Ignore StrictSlash for path prefix", + route: r.NewRoute().PathPrefix("/static/"), + request: newRequest("GET", "http://localhost/static/logo.png"), + vars: map[string]string{}, + host: "", + path: "/static/", + shouldMatch: true, + shouldRedirect: false, + }, } - if match.Handler != nil { - t.Errorf("Should not redirect") + + for _, test := range tests { + testRoute(t, test) } } @@ -616,6 +712,7 @@ func testRoute(t *testing.T, test routeTest) { host := test.host path := test.path url := test.host + test.path + shouldRedirect := test.shouldRedirect var match RouteMatch ok := route.Match(request, &match) @@ -653,6 +750,84 @@ func testRoute(t *testing.T, test routeTest) { return } } + if shouldRedirect && match.Handler == nil { + t.Errorf("(%v) Did not redirect", test.title) + return + } + if !shouldRedirect && match.Handler != nil { + t.Errorf("(%v) Unexpected redirect", test.title) + return + } + } +} + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} + +type TestA301ResponseWriter struct { + hh http.Header + status int +} + +func (ho TestA301ResponseWriter) Header() http.Header { + return http.Header(ho.hh) +} + +func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { + return 0, nil +} + +func (ho TestA301ResponseWriter) WriteHeader(code int) { + ho.status = code +} + +func Test301Redirect(t *testing.T) { + m := make(http.Header) + + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + + res := TestA301ResponseWriter{ + hh: m, + status: 0, + } + r.ServeHTTP(&res, req) + + if "http://localhost/api/?abc=def" != res.hh["Location"][0] { + t.Errorf("Should have complete URL with query string") } } diff --git a/vendor/src/github.com/gorilla/mux/old_test.go b/vendor/src/github.com/gorilla/mux/old_test.go index 7e266bb695..42530590e7 100644 --- a/vendor/src/github.com/gorilla/mux/old_test.go +++ b/vendor/src/github.com/gorilla/mux/old_test.go @@ -96,8 +96,8 @@ func TestRouteMatchers(t *testing.T) { method = "GET" headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} resultVars = map[bool]map[string]string{ - true: map[string]string{"var1": "www", "var2": "product", "var3": "42"}, - false: map[string]string{}, + true: {"var1": "www", "var2": "product", "var3": "42"}, + false: {}, } } @@ -110,8 +110,8 @@ func TestRouteMatchers(t *testing.T) { method = "POST" headers = map[string]string{"Content-Type": "application/json"} resultVars = map[bool]map[string]string{ - true: map[string]string{"var4": "google", "var5": "product", "var6": "42"}, - false: map[string]string{}, + true: {"var4": "google", "var5": "product", "var6": "42"}, + false: {}, } } diff --git a/vendor/src/github.com/gorilla/mux/regexp.go b/vendor/src/github.com/gorilla/mux/regexp.go index 4c3482bfbd..925f268abe 100644 --- a/vendor/src/github.com/gorilla/mux/regexp.go +++ b/vendor/src/github.com/gorilla/mux/regexp.go @@ -98,12 +98,13 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*rout } // Done! return &routeRegexp{ - template: template, - matchHost: matchHost, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, + template: template, + matchHost: matchHost, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, }, nil } @@ -114,6 +115,8 @@ type routeRegexp struct { template string // True for host match, false for path match. matchHost bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool // Expanded regexp. regexp *regexp.Regexp // Reverse template. @@ -216,7 +219,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) m.Vars[v] = pathVars[k+1] } // Check if we should redirect. - if r.strictSlash { + if v.path.strictSlash { p1 := strings.HasSuffix(req.URL.Path, "/") p2 := strings.HasSuffix(v.path.template, "/") if p1 != p2 { diff --git a/vendor/src/github.com/gorilla/mux/route.go b/vendor/src/github.com/gorilla/mux/route.go index cb538ea4ec..5cb2526d61 100644 --- a/vendor/src/github.com/gorilla/mux/route.go +++ b/vendor/src/github.com/gorilla/mux/route.go @@ -259,7 +259,8 @@ func (r *Route) Methods(methods ...string) *Route { // Path ----------------------------------------------------------------------- // Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". // Variables can define an optional regexp pattern to me matched: // // - {name} matches anything until the next slash. @@ -283,9 +284,16 @@ func (r *Route) Path(tpl string) *Route { // PathPrefix ----------------------------------------------------------------- -// PathPrefix adds a matcher for the URL path prefix. +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. func (r *Route) PathPrefix(tpl string) *Route { - r.strictSlash = false r.err = r.addRegexpMatcher(tpl, false, true) return r } @@ -328,7 +336,7 @@ func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { } // Schemes adds a matcher for URL schemes. -// It accepts a sequence schemes to be matched, e.g.: "http", "https". +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". func (r *Route) Schemes(schemes ...string) *Route { for k, v := range schemes { schemes[k] = strings.ToLower(v) diff --git a/vendor/src/github.com/kr/pty/ioctl.go b/vendor/src/github.com/kr/pty/ioctl.go new file mode 100644 index 0000000000..5b856e8711 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ioctl.go @@ -0,0 +1,11 @@ +package pty + +import "syscall" + +func ioctl(fd, cmd, ptr uintptr) error { + _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr) + if e != 0 { + return e + } + return nil +} diff --git a/vendor/src/github.com/kr/pty/ioctl_bsd.go b/vendor/src/github.com/kr/pty/ioctl_bsd.go new file mode 100644 index 0000000000..73b12c53cf --- /dev/null +++ b/vendor/src/github.com/kr/pty/ioctl_bsd.go @@ -0,0 +1,39 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package pty + +// from +const ( + _IOC_VOID uintptr = 0x20000000 + _IOC_OUT uintptr = 0x40000000 + _IOC_IN uintptr = 0x80000000 + _IOC_IN_OUT uintptr = _IOC_OUT | _IOC_IN + _IOC_DIRMASK = _IOC_VOID | _IOC_OUT | _IOC_IN + + _IOC_PARAM_SHIFT = 13 + _IOC_PARAM_MASK = (1 << _IOC_PARAM_SHIFT) - 1 +) + +func _IOC_PARM_LEN(ioctl uintptr) uintptr { + return (ioctl >> 16) & _IOC_PARAM_MASK +} + +func _IOC(inout uintptr, group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return inout | (param_len&_IOC_PARAM_MASK)<<16 | uintptr(group)<<8 | ioctl_num +} + +func _IO(group byte, ioctl_num uintptr) uintptr { + return _IOC(_IOC_VOID, group, ioctl_num, 0) +} + +func _IOR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_OUT, group, ioctl_num, param_len) +} + +func _IOW(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN, group, ioctl_num, param_len) +} + +func _IOWR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN_OUT, group, ioctl_num, param_len) +} diff --git a/vendor/src/github.com/kr/pty/ioctl_linux.go b/vendor/src/github.com/kr/pty/ioctl_linux.go new file mode 100644 index 0000000000..9fe7b0b0f9 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ioctl_linux.go @@ -0,0 +1,42 @@ +package pty + +// from +const ( + _IOC_NRBITS = 8 + _IOC_TYPEBITS = 8 + + _IOC_SIZEBITS = 14 + _IOC_DIRBITS = 2 + + _IOC_NRSHIFT = 0 + _IOC_TYPESHIFT = _IOC_NRSHIFT + _IOC_NRBITS + _IOC_SIZESHIFT = _IOC_TYPESHIFT + _IOC_TYPEBITS + _IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS + + _IOC_NONE uint = 0 + _IOC_WRITE uint = 1 + _IOC_READ uint = 2 +) + +func _IOC(dir uint, ioctl_type byte, nr byte, size uintptr) uintptr { + return (uintptr(dir)<<_IOC_DIRSHIFT | + uintptr(ioctl_type)<<_IOC_TYPESHIFT | + uintptr(nr)<<_IOC_NRSHIFT | + size<<_IOC_SIZESHIFT) +} + +func _IO(ioctl_type byte, nr byte) uintptr { + return _IOC(_IOC_NONE, ioctl_type, nr, 0) +} + +func _IOR(ioctl_type byte, nr byte, size uintptr) uintptr { + return _IOC(_IOC_READ, ioctl_type, nr, size) +} + +func _IOW(ioctl_type byte, nr byte, size uintptr) uintptr { + return _IOC(_IOC_WRITE, ioctl_type, nr, size) +} + +func _IOWR(ioctl_type byte, nr byte, size uintptr) uintptr { + return _IOC(_IOC_READ|_IOC_WRITE, ioctl_type, nr, size) +} diff --git a/vendor/src/github.com/kr/pty/mktypes.bash b/vendor/src/github.com/kr/pty/mktypes.bash new file mode 100755 index 0000000000..9952c88838 --- /dev/null +++ b/vendor/src/github.com/kr/pty/mktypes.bash @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +GOOSARCH="${GOOS}_${GOARCH}" +case "$GOOSARCH" in +_* | *_ | _) + echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 + exit 1 + ;; +esac + +GODEFS="go tool cgo -godefs" + +$GODEFS types.go |gofmt > ztypes_$GOARCH.go + +case $GOOS in +freebsd) + $GODEFS types_$GOOS.go |gofmt > ztypes_$GOOSARCH.go + ;; +esac diff --git a/vendor/src/github.com/kr/pty/pty_darwin.go b/vendor/src/github.com/kr/pty/pty_darwin.go index 597bb03e57..4f4d5ca26e 100644 --- a/vendor/src/github.com/kr/pty/pty_darwin.go +++ b/vendor/src/github.com/kr/pty/pty_darwin.go @@ -7,9 +7,6 @@ import ( "unsafe" ) -// see ioccom.h -const sys_IOCPARM_MASK = 0x1fff - func open() (pty, tty *os.File, err error) { p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) if err != nil { @@ -39,9 +36,13 @@ func open() (pty, tty *os.File, err error) { } func ptsname(f *os.File) (string, error) { - var n [(syscall.TIOCPTYGNAME >> 16) & sys_IOCPARM_MASK]byte + n := make([]byte, _IOC_PARM_LEN(syscall.TIOCPTYGNAME)) + + err := ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n[0]))) + if err != nil { + return "", err + } - ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n))) for i, c := range n { if c == 0 { return string(n[:i]), nil @@ -51,19 +52,9 @@ func ptsname(f *os.File) (string, error) { } func grantpt(f *os.File) error { - var u int - return ioctl(f.Fd(), syscall.TIOCPTYGRANT, uintptr(unsafe.Pointer(&u))) + return ioctl(f.Fd(), syscall.TIOCPTYGRANT, 0) } func unlockpt(f *os.File) error { - var u int - return ioctl(f.Fd(), syscall.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) -} - -func ioctl(fd, cmd, ptr uintptr) error { - _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr) - if e != 0 { - return syscall.ENOTTY - } - return nil + return ioctl(f.Fd(), syscall.TIOCPTYUNLK, 0) } diff --git a/vendor/src/github.com/kr/pty/pty_freebsd.go b/vendor/src/github.com/kr/pty/pty_freebsd.go index 13b64d722e..b341babd05 100644 --- a/vendor/src/github.com/kr/pty/pty_freebsd.go +++ b/vendor/src/github.com/kr/pty/pty_freebsd.go @@ -1,53 +1,73 @@ package pty import ( + "errors" "os" - "strconv" "syscall" "unsafe" ) -const ( - sys_TIOCGPTN = 0x4004740F - sys_TIOCSPTLCK = 0x40045431 -) +func posix_openpt(oflag int) (fd int, err error) { + r0, _, e1 := syscall.Syscall(syscall.SYS_POSIX_OPENPT, uintptr(oflag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} func open() (pty, tty *os.File, err error) { - p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) + fd, err := posix_openpt(syscall.O_RDWR | syscall.O_CLOEXEC) if err != nil { return nil, nil, err } + p := os.NewFile(uintptr(fd), "/dev/pts") sname, err := ptsname(p) if err != nil { return nil, nil, err } - t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0) + t, err := os.OpenFile("/dev/"+sname, os.O_RDWR, 0) if err != nil { return nil, nil, err } return p, t, nil } +func isptmaster(fd uintptr) (bool, error) { + err := ioctl(fd, syscall.TIOCPTMASTER, 0) + return err == nil, err +} + +var ( + emptyFiodgnameArg fiodgnameArg + ioctl_FIODGNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg)) +) + func ptsname(f *os.File) (string, error) { - var n int - err := ioctl(f.Fd(), sys_TIOCGPTN, &n) + master, err := isptmaster(f.Fd()) if err != nil { return "", err } - return "/dev/pts/" + strconv.Itoa(n), nil -} - -func ioctl(fd uintptr, cmd uintptr, data *int) error { - _, _, e := syscall.Syscall( - syscall.SYS_IOCTL, - fd, - cmd, - uintptr(unsafe.Pointer(data)), - ) - if e != 0 { - return syscall.ENOTTY + if !master { + return "", syscall.EINVAL } - return nil + + const n = _C_SPECNAMELEN + 1 + var ( + buf = make([]byte, n) + arg = fiodgnameArg{Len: n, Buf: (*byte)(unsafe.Pointer(&buf[0]))} + ) + err = ioctl(f.Fd(), ioctl_FIODGNAME, uintptr(unsafe.Pointer(&arg))) + if err != nil { + return "", err + } + + for i, c := range buf { + if c == 0 { + return string(buf[:i]), nil + } + } + return "", errors.New("FIODGNAME string not NUL-terminated") } diff --git a/vendor/src/github.com/kr/pty/pty_linux.go b/vendor/src/github.com/kr/pty/pty_linux.go index a5edfbb394..6e5a04241c 100644 --- a/vendor/src/github.com/kr/pty/pty_linux.go +++ b/vendor/src/github.com/kr/pty/pty_linux.go @@ -7,9 +7,9 @@ import ( "unsafe" ) -const ( - sys_TIOCGPTN = 0x80045430 - sys_TIOCSPTLCK = 0x40045431 +var ( + ioctl_TIOCGPTN = _IOR('T', 0x30, unsafe.Sizeof(_C_uint(0))) /* Get Pty Number (of pty-mux device) */ + ioctl_TIOCSPTLCK = _IOW('T', 0x31, unsafe.Sizeof(_C_int(0))) /* Lock/unlock Pty */ ) func open() (pty, tty *os.File, err error) { @@ -36,28 +36,16 @@ func open() (pty, tty *os.File, err error) { } func ptsname(f *os.File) (string, error) { - var n int - err := ioctl(f.Fd(), sys_TIOCGPTN, &n) + var n _C_uint + err := ioctl(f.Fd(), ioctl_TIOCGPTN, uintptr(unsafe.Pointer(&n))) if err != nil { return "", err } - return "/dev/pts/" + strconv.Itoa(n), nil + return "/dev/pts/" + strconv.Itoa(int(n)), nil } func unlockpt(f *os.File) error { - var u int - return ioctl(f.Fd(), sys_TIOCSPTLCK, &u) -} - -func ioctl(fd uintptr, cmd uintptr, data *int) error { - _, _, e := syscall.Syscall( - syscall.SYS_IOCTL, - fd, - cmd, - uintptr(unsafe.Pointer(data)), - ) - if e != 0 { - return syscall.ENOTTY - } - return nil + var u _C_int + // use TIOCSPTLCK with a zero valued arg to clear the slave pty lock + return ioctl(f.Fd(), ioctl_TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) } diff --git a/vendor/src/github.com/kr/pty/pty_unsupported.go b/vendor/src/github.com/kr/pty/pty_unsupported.go index d4958b3583..898c7303c4 100644 --- a/vendor/src/github.com/kr/pty/pty_unsupported.go +++ b/vendor/src/github.com/kr/pty/pty_unsupported.go @@ -9,19 +9,3 @@ import ( func open() (pty, tty *os.File, err error) { return nil, nil, ErrUnsupported } - -func ptsname(f *os.File) (string, error) { - return "", ErrUnsupported -} - -func grantpt(f *os.File) error { - return ErrUnsupported -} - -func unlockpt(f *os.File) error { - return ErrUnsupported -} - -func ioctl(fd, cmd, ptr uintptr) error { - return ErrUnsupported -} diff --git a/vendor/src/github.com/kr/pty/types.go b/vendor/src/github.com/kr/pty/types.go new file mode 100644 index 0000000000..5aecb6bcdc --- /dev/null +++ b/vendor/src/github.com/kr/pty/types.go @@ -0,0 +1,10 @@ +// +build ignore + +package pty + +import "C" + +type ( + _C_int C.int + _C_uint C.uint +) diff --git a/vendor/src/github.com/kr/pty/types_freebsd.go b/vendor/src/github.com/kr/pty/types_freebsd.go new file mode 100644 index 0000000000..ce3eb95181 --- /dev/null +++ b/vendor/src/github.com/kr/pty/types_freebsd.go @@ -0,0 +1,15 @@ +// +build ignore + +package pty + +/* +#include +#include +*/ +import "C" + +const ( + _C_SPECNAMELEN = C.SPECNAMELEN /* max length of devicename */ +) + +type fiodgnameArg C.struct_fiodgname_arg diff --git a/vendor/src/github.com/kr/pty/ztypes_386.go b/vendor/src/github.com/kr/pty/ztypes_386.go new file mode 100644 index 0000000000..ff0b8fd838 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_386.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/src/github.com/kr/pty/ztypes_amd64.go b/vendor/src/github.com/kr/pty/ztypes_amd64.go new file mode 100644 index 0000000000..ff0b8fd838 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_amd64.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/src/github.com/kr/pty/ztypes_arm.go b/vendor/src/github.com/kr/pty/ztypes_arm.go new file mode 100644 index 0000000000..ff0b8fd838 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_arm.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/src/github.com/kr/pty/ztypes_freebsd_386.go b/vendor/src/github.com/kr/pty/ztypes_freebsd_386.go new file mode 100644 index 0000000000..d9975374e3 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_freebsd_386.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go b/vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go new file mode 100644 index 0000000000..5fa102fcdf --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go @@ -0,0 +1,14 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Pad_cgo_0 [4]byte + Buf *byte +} diff --git a/vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go b/vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go new file mode 100644 index 0000000000..d9975374e3 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go b/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go index 3aaae5973a..c5f335f7fb 100644 --- a/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go +++ b/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go @@ -388,6 +388,11 @@ func (c *capsV3) Apply(kind CapType) (err error) { } err = prctl(syscall.PR_CAPBSET_DROP, uintptr(i), 0, 0, 0) if err != nil { + // Ignore EINVAL since the capability may not be supported in this system. + if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL { + err = nil + continue + } return } }