diff --git a/.mailmap b/.mailmap index a34fc4823c..683758650e 100644 --- a/.mailmap +++ b/.mailmap @@ -6,14 +6,16 @@ Guillaume J. Charmes + - -Thatcher Peskens dhrp -Thatcher Peskens dhrp +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp Jérôme Petazzoni jpetazzo Jérôme Petazzoni -Joffrey F - +Joffrey F +Joffrey F +Joffrey F Tim Terhorst Andy Smith @@ -23,7 +25,6 @@ Andy Smith -Thatcher Peskens Walter Stanish @@ -54,7 +55,26 @@ Jean-Baptiste Dalido - - -Sven Dowideit ¨Sven <¨SvenDowideit@home.org.au¨> + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> unclejack + +Alexandr Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Will Weaver diff --git a/AUTHORS b/AUTHORS index adfcfaa851..10f01fb589 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,44 +1,62 @@ # This file lists all individuals having contributed content to the repository. -# If you're submitting a patch, please add your name here in alphabetical order as part of the patch. -# -# For a list of active project maintainers, see the MAINTAINERS file. -# +# For how it is generated, see `.mailmap`. + Aanand Prasad Aaron Feng +Aaron Huslage Abel Muiño +Adam Miller +Adam Singer +Aditya +Adrian Mouat +alambike +Aleksa Sarai Alexander Larsson +Alexandr Morozov +Alexey Kotlyarov Alexey Shamrin Alex Gaynor Alexis THOMAS +almoehi Al Tobey +amangoel Andrea Luzzardi Andreas Savvides Andreas Tiefenthaler +Andrea Turli Andrew Duckworth Andrew Macgregor Andrew Munsell Andrews Medina +Andrew Williams Andy Chambers andy diller Andy Goldstein +Andy Kipp Andy Rothfusz Andy Smith Anthony Bishopric Anton Nikitin Antony Messerli apocas +Arnaud Porterie Asbjørn Enge +Barnaby Gray Barry Allard Bartłomiej Piotrowski +Benjamin Atkin Benoit Chesneau Ben Sargent Ben Toews Ben Wiklund +Bernerd Schaefer Bhiraj Butala +bin liu Bouke Haarsma Brandon Liu Brandon Philips Brian Dorsey +Brian Flad Brian Goff Brian McCallister Brian Olsen @@ -46,11 +64,15 @@ Brian Shumate Briehan Lombaard Bruno Bigras Bryan Matsuo +Bryan Murphy Caleb Spare Calen Pennington +Cameron Boehmer Carl X. Su Charles Hooper Charles Lindsay +Charles Merriam +Charlie Lewis Chia-liang Kao Chris St. Pierre Christopher Currie @@ -61,6 +83,7 @@ Colin Dunklau Colin Rice Cory Forsyth cressie176 +Dafydd Crosby Dan Buch Dan Hirsch Daniel Exner @@ -72,30 +95,45 @@ Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin +Dan Keder +Dan McPherson +Danny Berger Danny Yates +Dan Stine +Dan Walsh +Dan Williams Darren Coxall +Darren Shepherd David Anderson David Calavera +David Gageot David Mcanulty +David Röthlisberger David Sissitka Deni Bertovic Dinesh Subhraveti +Djibril Koné dkumor Dmitry Demeshchuk +Dolph Mathews Dominik Honnef Don Spaulding Dražen Lučanin Dr Nic Williams Dustin Sallings Edmund Wagner +Eiichi Tsukata +Eivind Uggedal Elias Probst Emil Hernvall Emily Rose Eric Hanchrow Eric Lee Eric Myhre +Erik Hollensbe Erno Hopearuoho eugenkrizo +Evan Hazlett Evan Krall Evan Phoenix Evan Wies @@ -106,6 +144,7 @@ Fabio Rehm Fabrizio Regini Faiz Khan Fareed Dudhia +Felix Rabe Fernando Flavio Castelli Francisco Souza @@ -117,8 +156,11 @@ Gabe Rosenhouse Gabriel Monroy Galen Sampson Gareth Rushgrove +Geoffrey Bachelet Gereon Frey +German DZ Gert van Valkenhoef +Goffert van Gool Graydon Hoare Greg Thornton grunny @@ -127,28 +169,40 @@ Gurjeet Singh Guruprasad Harley Laue Hector Castro +Hobofan Hunter Blanks +Ian Truslove +ILYA Khlopotov inglesp Isaac Dupree +Isabel Jimenez Isao Jonas +Jack Danger Canty +jakedt Jake Moshenko James Allen James Carr +James DeFelice +James Harrison Fisher James Mills James Turnbull jaseg Jason McVetta +Jason Plum Jean-Baptiste Barth Jean-Baptiste Dalido Jeff Lindsay Jeremy Grosser Jérôme Petazzoni Jesse Dubay +Jilles Oldenbeuving Jim Alateras Jimmy Cuadra Joe Beda +Joel Handwell +Joe Shaw Joe Van Dyk -Joffrey F +Joffrey F Johan Euphrosine Johannes 'fish' Ziemke Johan Rydberg @@ -157,7 +211,9 @@ John Feminella John Gardiner Myers John Warwick Jonas Pfenniger +Jonathan McCrohan Jonathan Mueller +Jonathan Pares Jonathan Rudenberg Jon Wedaman Joost Cassee @@ -172,13 +228,17 @@ Julien Barbier Julien Dubois Justin Force Justin Plock +Justin Simonelis Karan Lyons Karl Grzeszczak +Kato Kazuyoshi Kawsar Saiyeed Keli Hu Ken Cochrane +Ken ICHIKAWA Kevin Clark Kevin J. Lynagh +Kevin Menard Kevin Wallace Keyvan Fatehi kim0 @@ -187,14 +247,20 @@ Kimbro Staken Kiran Gangadharan Konstantin Pelykh Kyle Conroy +lalyos +Lance Chen +Lars R. Damerow Laurie Voss +Lewis Peckover Liang-Chi Hsieh Lokesh Mandvekar Louis Opter lukaspustina +lukemarsden Mahesh Tiyyagura Manuel Meurer Manuel Woelker +Marc Abramowitz Marc Kuo Marco Hennings Marcus Farkas @@ -206,23 +272,32 @@ Marko Mikulicic Markus Fix Martijn van Oosterhout Martin Redmond +Mason Malone +Mateusz Sulima Mathieu Le Marec - Pasquet Matt Apperson Matt Bachmann Matt Haggard Matthew Mueller +Matthias Klumpp +Matthias Kühnle mattymo Maxime Petazzoni Maxim Treskin +Max Shytikov meejah +Michael Brown Michael Crosby Michael Gorsuch +Michael Neale Michael Stapelberg Miguel Angel Fernández Mike Gaffney +Mike MacCana Mike Naberezny Mikhail Sobolev Mohit Soni +Morgante Pell Morten Siebuhr Nan Monnand Deng Nate Jones @@ -234,22 +309,26 @@ Nick Stenning Nick Stinemates Nicolas Dudebout Nicolas Kaiser +noducks Nolan Darilek odk- Oguz Bilgic Ole Reifschneider -O.S.Tezer +O.S. Tezer pandrew Pascal Borreli pattichen +Paul Annesley Paul Bowsher Paul Hammond +Paul Jimenez Paul Lietar Paul Morie Paul Nasrat Paul Peter Braden Peter Waller +Phillip Alexander Phil Spitler Piergiuliano Bossi Pierre-Alain RIVIERE @@ -257,6 +336,8 @@ Piotr Bogdan pysqz Quentin Brossard Rafal Jeczalik +Rajat Pandit +Ralph Bean Ramkumar Ramachandra Ramon van Alteren Renato Riccieri Santos Zannon @@ -266,54 +347,71 @@ Richo Healey Rick Bradley Robert Obryk Roberto G. Hashioka -Roberto Hashioka +robpc Rodrigo Vaz Roel Van Nyen Roger Peppe +Rohit Jnagal +Roland Moriz +Rovanion Luckey +Ryan Aslett Ryan Fowler Ryan O'Donnell Ryan Seto +Ryan Thomas Sam Alba Sam J Sharpe +Sam Rijs Samuel Andaya Scott Bessler +Scott Collier Sean Cronin Sean P. Kane +Sébastien Stormacq Shawn Landden Shawn Siefkas Shih-Yuan Lee -shin- Silas Sewell Simon Taranto +Sindhu S Sjoerd Langkemper -Solomon Hykes +Solomon Hykes Song Gao +Soulou Sridatta Thatipamala Sridhar Ratnakumar Steeve Morin Stefan Praszalowicz +Steven Burgess sudosurootdev -Sven Dowideit +Sven Dowideit Sylvain Bellemare tang0th Tatsuki Sugiura Tehmasp Chaudhri -Thatcher Peskens +Thatcher Peskens Thermionix Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Hansen Thomas LEVEIL +Thomas Schroeter Tianon Gravi -Tim Bosse +Tibor Vass +Tim Bosse +Timothy Hobbs +Tim Ruffles Tim Terhorst +tjmehta Tobias Bieniek Tobias Schmidt Tobias Schwab Todd Lunter +Tom Fotherby Tom Hulihan Tommaso Visconti +Tony Daws Travis Cline Tyler Brock Tzu-Jung Lee @@ -322,26 +420,35 @@ unclejack vgeta Victor Coisne Victor Lyuboslavsky +Victor Marmol Victor Vieux +Viktor Vojnovski Vincent Batts Vincent Bernat +Vincent Mayers Vincent Woo Vinod Kulkarni +Vishnu Kannan Vitor Monteiro Vivek Agarwal +Vladimir Bulyga Vladimir Kirillov -Vladimir Rutsky +Vladimir Rutsky +Walter Leibbrandt Walter Stanish WarheadsSE Wes Morgan Will Dietz William Delanoue +William Henry Will Rouesnel Will Weaver Xiuming Chen Yang Bai +Yasunori Mahata Yurii Rashkovskii Zain Memon Zaiste! Zilin Du zimbatm +zqh diff --git a/CHANGELOG.md b/CHANGELOG.md index 14329ab96c..07f11d90f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + ## 0.11.1 (2014-05-07) #### Registry diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d77afbc443..cb5c806514 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -77,13 +77,8 @@ well as a clean documentation build. See ``docs/README.md`` for more information on building the docs and how docs get released. Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `go fmt` before committing your changes. Most -editors have plugins that do this automatically, and there's also a git -pre-commit hook: - -``` -curl -o .git/hooks/pre-commit https://raw.githubusercontent.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit -``` +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plugins that do this automatically. Pull requests descriptions should be as clear as possible and include a reference to all the issues that they address. @@ -108,10 +103,8 @@ same commit so that a revert would remove all traces of the feature or fix. Commits that fix or close an issue should include a reference like `Closes #XXX` or `Fixes #XXX`, which will automatically close the issue when merged. -Add your name to the AUTHORS file, but make sure the list is sorted and your -name and email address match your git configuration. The AUTHORS file is -regenerated occasionally from the git commit history, so a mismatch may result -in your changes being overwritten. +Please do not add yourself to the AUTHORS file, as it is regenerated +regularly from the Git history. ### Merge approval @@ -182,7 +175,7 @@ One way to automate this, is customise your get ``commit.template`` by adding a ``prepare-commit-msg`` hook to your docker checkout: ``` -curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg +curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg ``` * Note: the above script expects to find your GitHub user name in ``git config --get github.user`` @@ -192,7 +185,10 @@ curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/mas There are several exceptions to the signing requirement. Currently these are: * Your patch fixes spelling or grammar errors. -* Your patch is a single line change to documentation. +* Your patch is a single line change to documentation contained in the + `docs` directory. +* Your patch fixes Markdown formatting or syntax errors in the + documentation contained in the `docs` directory. If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io) diff --git a/Dockerfile b/Dockerfile index be2233ff87..283e0a3262 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,7 +24,7 @@ # docker-version 0.6.1 -FROM ubuntu:13.10 +FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) # Packaged dependencies @@ -41,6 +41,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ libapparmor-dev \ libcap-dev \ libsqlite3-dev \ + lxc=1.0* \ mercurial \ pandoc \ reprepro \ @@ -49,10 +50,6 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ s3cmd=1.1.0* \ --no-install-recommends -# Get and compile LXC 0.8 (since it is the most stable) -RUN git clone --no-checkout https://github.com/lxc/lxc.git /usr/local/lxc && cd /usr/local/lxc && git checkout -q lxc-0.8.0 -RUN cd /usr/local/lxc && ./autogen.sh && ./configure --disable-docs && make && make install - # Get lvm2 source for compiling statically RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags @@ -84,7 +81,7 @@ RUN go get code.google.com/p/go.tools/cmd/cover RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 # Get the "busybox" image source so we can build locally instead of pulling -RUN git clone https://github.com/jpetazzo/docker-busybox.git /docker-busybox +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox # Setup s3cmd config RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg @@ -92,6 +89,10 @@ RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_ # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + VOLUME /var/lib/docker WORKDIR /go/src/github.com/dotcloud/docker ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/MAINTAINERS b/MAINTAINERS index 581953cf8d..1543c8f823 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2,6 +2,8 @@ Solomon Hykes (@shykes) Guillaume J. Charmes (@creack) Victor Vieux (@vieux) Michael Crosby (@crosbymichael) +.mailmap: Tianon Gravi (@tianon) .travis.yml: Tianon Gravi (@tianon) +AUTHORS: Tianon Gravi (@tianon) Dockerfile: Tianon Gravi (@tianon) Makefile: Tianon Gravi (@tianon) diff --git a/Makefile b/Makefile index a4c8658e08..a8e4dc5ca1 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ docs-release: docs-build $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh test: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary test-unit test-integration test-integration-cli + $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli test-unit: build $(DOCKER_RUN_DOCKER) hack/make.sh test-unit diff --git a/README.md b/README.md index fae1bb916b..c965efafe8 100644 --- a/README.md +++ b/README.md @@ -190,3 +190,9 @@ It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see http://www.bis.doc.gov + + +Licensing +========= +Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text. + diff --git a/VERSION b/VERSION index af88ba8248..ac454c6a1f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.11.1 +0.12.0 diff --git a/api/README.md b/api/README.md new file mode 100644 index 0000000000..3ef33f8c29 --- /dev/null +++ b/api/README.md @@ -0,0 +1,5 @@ +This directory contains code pertaining to the Docker API: + + - Used by the docker client when comunicating with the docker deamon + + - Used by third party tools wishing to interface with the docker deamon diff --git a/api/client/cli.go b/api/client/cli.go index 49fb3c978f..bb5d191e16 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -23,6 +23,9 @@ var funcMap = template.FuncMap{ } func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { + if len(name) == 0 { + return nil, false + } methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:]) method := reflect.ValueOf(cli).MethodByName(methodName) if !method.IsValid() { @@ -73,7 +76,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsC } if in != nil { - if file, ok := in.(*os.File); ok { + if file, ok := out.(*os.File); ok { terminalFd = file.Fd() isTerminal = term.IsTerminal(terminalFd) } diff --git a/api/client/commands.go b/api/client/commands.go index 89f9b0a4c4..a6a2e35539 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -13,7 +13,7 @@ import ( "os" "os/exec" "path" - goruntime "runtime" + "runtime" "strconv" "strings" "syscall" @@ -26,11 +26,14 @@ import ( "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/opts" "github.com/dotcloud/docker/pkg/signal" "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/pkg/units" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/utils/filters" ) func (cli *DockerCli) CmdHelp(args ...string) error { @@ -46,7 +49,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error { help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET) for _, command := range [][]string{ {"attach", "Attach to a running container"}, - {"build", "Build a container from a Dockerfile"}, + {"build", "Build an image from a Dockerfile"}, {"commit", "Create a new image from a container's changes"}, {"cp", "Copy files/folders from the containers filesystem to the host path"}, {"diff", "Inspect changes on a container's filesystem"}, @@ -62,6 +65,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error { {"login", "Register or Login to the docker registry server"}, {"logs", "Fetch the logs of a container"}, {"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"}, + {"pause", "Pause all processes within a container"}, {"ps", "List containers"}, {"pull", "Pull an image or a repository from the docker registry server"}, {"push", "Push an image or a repository to the docker registry server"}, @@ -75,6 +79,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error { {"stop", "Stop a running container"}, {"tag", "Tag an image into a repository"}, {"top", "Lookup the running processes of a container"}, + {"unpause", "Unpause a paused container"}, {"version", "Show the docker version information"}, {"wait", "Block until a container stops, then print its exit code"}, } { @@ -104,11 +109,12 @@ func (cli *DockerCli) CmdInsert(args ...string) error { } func (cli *DockerCli) CmdBuild(args ...string) error { - cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH") + cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new image from the source code at PATH") tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") + forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds") if err := cmd.Parse(args); err != nil { return nil } @@ -160,6 +166,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error { if _, err = os.Stat(filename); os.IsNotExist(err) { return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) } + if err = utils.ValidateContextDirectory(root); err != nil { + return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) + } context, err = archive.Tar(root, archive.Uncompressed) } var body io.Reader @@ -167,9 +176,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error { // FIXME: ProgressReader shouldn't be this annoying to use if context != nil { sf := utils.NewStreamFormatter(false) - body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context") + body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Sending build context to Docker daemon") } - // Upload the build context + // Send the build context v := &url.Values{} //Check if the given image name can be resolved @@ -193,6 +202,12 @@ func (cli *DockerCli) CmdBuild(args ...string) error { } if *rm { v.Set("rm", "1") + } else { + v.Set("rm", "0") + } + + if *forceRm { + v.Set("forcerm", "1") } cli.LoadConfigFile() @@ -359,7 +374,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error { fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) } fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION) - fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) + fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version()) if dockerversion.GITCOMMIT != "" { fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) } @@ -384,16 +399,8 @@ func (cli *DockerCli) CmdVersion(args ...string) error { if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" { fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion) } - fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) - release := utils.GetReleaseVersion() - if release != "" { - fmt.Fprintf(cli.out, "Last stable version: %s", release) - if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { - fmt.Fprintf(cli.out, ", please update docker") - } - fmt.Fprintf(cli.out, "\n") - } + fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) return nil } @@ -555,10 +562,14 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { func (cli *DockerCli) CmdStart(args ...string) error { var ( + cErr chan error + tty bool + cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") ) + if err := cmd.Parse(args); err != nil { return nil } @@ -567,29 +578,24 @@ func (cli *DockerCli) CmdStart(args ...string) error { return nil } - var ( - cErr chan error - tty bool - ) if *attach || *openStdin { if cmd.NArg() > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") } - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) + steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) if err != nil { return err } - container := &api.Container{} - err = json.Unmarshal(body, container) - if err != nil { + env := engine.Env{} + if err := env.Decode(steam); err != nil { return err } + config := env.GetSubEnv("Config") + tty = config.GetBool("Tty") - tty = container.Config.Tty - - if !container.Config.Tty { + if !tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) defer signal.StopCatch(sigc) } @@ -598,15 +604,17 @@ func (cli *DockerCli) CmdStart(args ...string) error { v := url.Values{} v.Set("stream", "1") - if *openStdin && container.Config.OpenStdin { + + if *openStdin && config.GetBool("OpenStdin") { v.Set("stdin", "1") in = cli.in } + v.Set("stdout", "1") v.Set("stderr", "1") cErr = utils.Go(func() error { - return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil) + return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil) }) } @@ -643,6 +651,52 @@ func (cli *DockerCli) CmdStart(args ...string) error { return nil } +func (cli *DockerCli) CmdUnpause(args ...string) error { + cmd := cli.Subcmd("unpause", "CONTAINER", "Unpause all processes within a container") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdPause(args ...string) error { + cmd := cli.Subcmd("pause", "CONTAINER", "Pause all processes within a container") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to pause container named %s", name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + func (cli *DockerCli) CmdInspect(args ...string) error { cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image") tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") @@ -769,34 +823,38 @@ func (cli *DockerCli) CmdPort(args ...string) error { } var ( - port = cmd.Arg(1) - proto = "tcp" - parts = strings.SplitN(port, "/", 2) - container api.Container + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) ) if len(parts) == 2 && len(parts[1]) != 0 { port = parts[0] proto = parts[1] } - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) + + steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) if err != nil { return err } - err = json.Unmarshal(body, &container) - if err != nil { + env := engine.Env{} + if err := env.Decode(steam); err != nil { + return err + } + ports := nat.PortMap{} + if err := env.GetSubEnv("NetworkSettings").GetJson("Ports", &ports); err != nil { return err } - if frontends, exists := container.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { + if frontends, exists := ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { for _, frontend := range frontends { fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) } - } else { - return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0)) + return nil } - return nil + + return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0)) } // 'docker rmi IMAGE' removes all images with the name IMAGE @@ -884,14 +942,14 @@ func (cli *DockerCli) CmdHistory(args ...string) error { fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) } - fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) + fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) if *noTrunc { fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) } else { fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) } - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size"))) + fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("Size"))) } else { if *noTrunc { fmt.Fprintln(w, outID) @@ -1142,6 +1200,9 @@ func (cli *DockerCli) CmdImages(args ...string) error { flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format") flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format") + var flFilter opts.ListOpts + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") + if err := cmd.Parse(args); err != nil { return nil } @@ -1150,11 +1211,32 @@ func (cli *DockerCli) CmdImages(args ...string) error { return nil } - filter := cmd.Arg(0) + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + imageFilterArgs := filters.Args{} + for _, f := range flFilter.GetAll() { + var err error + imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) + if err != nil { + return err + } + } + matchName := cmd.Arg(0) // FIXME: --viz and --tree are deprecated. Remove them in a future version. if *flViz || *flTree { - body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false)) + v := url.Values{ + "all": []string{"1"}, + } + if len(imageFilterArgs) > 0 { + filterJson, err := filters.ToParam(imageFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJson) + } + + body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) if err != nil { return err } @@ -1184,13 +1266,13 @@ func (cli *DockerCli) CmdImages(args ...string) error { } } - if filter != "" { - if filter == image.Get("Id") || filter == utils.TruncateID(image.Get("Id")) { + if matchName != "" { + if matchName == image.Get("Id") || matchName == utils.TruncateID(image.Get("Id")) { startImage = image } for _, repotag := range image.GetList("RepoTags") { - if repotag == filter { + if repotag == matchName { startImage = image } } @@ -1208,7 +1290,7 @@ func (cli *DockerCli) CmdImages(args ...string) error { root := engine.NewTable("Created", 1) root.Add(startImage) cli.WalkTree(*noTrunc, root, byParent, "", printNode) - } else if filter == "" { + } else if matchName == "" { cli.WalkTree(*noTrunc, roots, byParent, "", printNode) } if *flViz { @@ -1216,8 +1298,17 @@ func (cli *DockerCli) CmdImages(args ...string) error { } } else { v := url.Values{} + if len(imageFilterArgs) > 0 { + filterJson, err := filters.ToParam(imageFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJson) + } + if cmd.NArg() == 1 { - v.Set("filter", filter) + // FIXME rename this parameter, to not be confused with the filters flag + v.Set("filter", matchName) } if *all { v.Set("all", "1") @@ -1249,7 +1340,7 @@ func (cli *DockerCli) CmdImages(args ...string) error { } if !*quiet { - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize"))) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(out.GetInt64("VirtualSize"))) } else { fmt.Fprintln(w, outID) } @@ -1323,7 +1414,7 @@ func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix stri imageID = utils.TruncateID(image.Get("Id")) } - fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize"))) + fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(image.GetInt64("VirtualSize"))) if image.GetList("RepoTags")[0] != ":" { fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) } else { @@ -1408,12 +1499,12 @@ func (cli *DockerCli) CmdPs(args ...string) error { outCommand = utils.Trunc(outCommand, 20) } ports.ReadListFrom([]byte(out.Get("Ports"))) - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) if *size { if out.GetInt("SizeRootFs") > 0 { - fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs"))) + fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(out.GetInt64("SizeRw")), units.HumanSize(out.GetInt64("SizeRootFs"))) } else { - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw"))) + fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("SizeRw"))) } } else { fmt.Fprint(w, "\n") @@ -1581,72 +1672,84 @@ func (cli *DockerCli) CmdDiff(args ...string) error { } func (cli *DockerCli) CmdLogs(args ...string) error { - cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") - follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") - times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + var ( + cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") + follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + ) + if err := cmd.Parse(args); err != nil { return nil } + if cmd.NArg() != 1 { cmd.Usage() return nil } name := cmd.Arg(0) - body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + + steam, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) if err != nil { return err } - container := &api.Container{} - err = json.Unmarshal(body, container) - if err != nil { + env := engine.Env{} + if err := env.Decode(steam); err != nil { return err } v := url.Values{} v.Set("stdout", "1") v.Set("stderr", "1") + if *times { v.Set("timestamps", "1") } - if *follow && container.State.Running { + + if *follow { v.Set("follow", "1") } - if err := cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil { - return err - } - return nil + return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil) } func (cli *DockerCli) CmdAttach(args ...string) error { - cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") - noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin") - proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") + var ( + cmd = cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") + noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin") + proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") + ) + if err := cmd.Parse(args); err != nil { return nil } + if cmd.NArg() != 1 { cmd.Usage() return nil } name := cmd.Arg(0) - body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + + stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) if err != nil { return err } - container := &api.Container{} - err = json.Unmarshal(body, container) - if err != nil { + env := engine.Env{} + if err := env.Decode(stream); err != nil { return err } - if !container.State.Running { + if !env.GetSubEnv("State").GetBool("Running") { return fmt.Errorf("You cannot attach to a stopped container, start it first") } - if container.Config.Tty && cli.isTerminal { + var ( + config = env.GetSubEnv("Config") + tty = config.GetBool("Tty") + ) + + if tty && cli.isTerminal { if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { utils.Debugf("Error monitoring TTY size: %s", err) } @@ -1656,19 +1759,20 @@ func (cli *DockerCli) CmdAttach(args ...string) error { v := url.Values{} v.Set("stream", "1") - if !*noStdin && container.Config.OpenStdin { + if !*noStdin && config.GetBool("OpenStdin") { v.Set("stdin", "1") in = cli.in } + v.Set("stdout", "1") v.Set("stderr", "1") - if *proxy && !container.Config.Tty { + if *proxy && !tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) defer signal.StopCatch(sigc) } - if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil { + if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil); err != nil { return err } @@ -1686,7 +1790,8 @@ func (cli *DockerCli) CmdAttach(args ...string) error { func (cli *DockerCli) CmdSearch(args ...string) error { cmd := cli.Subcmd("search", "TERM", "Search the docker index for images") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds") + trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds") + automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars") if err := cmd.Parse(args); err != nil { return nil @@ -1709,9 +1814,9 @@ func (cli *DockerCli) CmdSearch(args ...string) error { return err } w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n") + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") for _, out := range outs.Data { - if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) { + if ((*automated || *trusted) && (!out.GetBool("is_trusted") && !out.GetBool("is_automated"))) || (*stars > out.GetInt("star_count")) { continue } desc := strings.Replace(out.Get("description"), "\n", " ", -1) @@ -1725,7 +1830,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error { } fmt.Fprint(w, "\t") - if out.GetBool("is_trusted") { + if out.GetBool("is_automated") || out.GetBool("is_trusted") { fmt.Fprint(w, "[OK]") } fmt.Fprint(w, "\n") @@ -1839,6 +1944,10 @@ func (cli *DockerCli) CmdRun(args ...string) error { v := url.Values{} repos, tag := utils.ParseRepositoryTag(config.Image) + // pull only the image tagged 'latest' if no tag was specified + if tag == "" { + tag = "latest" + } v.Set("fromImage", repos) v.Set("tag", tag) @@ -2058,7 +2167,7 @@ func (cli *DockerCli) CmdCp(args ...string) error { } if statusCode == 200 { - if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { + if err := archive.Untar(stream, copyData.Get("HostPath"), &archive.TarOptions{NoLchown: true}); err != nil { return err } } diff --git a/api/client/utils.go b/api/client/utils.go index 8f303dcd98..2620683708 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -200,7 +200,7 @@ func waitForExit(cli *DockerCli, containerId string) (int, error) { // getExitCode perform an inspect on the container. It returns // the running state and the exit code. func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { - body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false)) + steam, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false) if err != nil { // If we can't connect, then the daemon probably died. if err != ErrConnectionRefused { @@ -208,11 +208,14 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { } return false, -1, nil } - c := &api.Container{} - if err := json.Unmarshal(body, c); err != nil { + + var result engine.Env + if err := result.Decode(steam); err != nil { return false, -1, err } - return c.State.Running, c.State.ExitCode, nil + + state := result.GetSubEnv("State") + return state.GetBool("Running"), state.GetInt("ExitCode"), nil } func (cli *DockerCli) monitorTtySize(id string) error { diff --git a/api/common.go b/api/common.go index af4ced4f6e..a20c5d7d1c 100644 --- a/api/common.go +++ b/api/common.go @@ -2,15 +2,16 @@ package api import ( "fmt" + "mime" + "strings" + "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/version" "github.com/dotcloud/docker/utils" - "mime" - "strings" ) const ( - APIVERSION version.Version = "1.11" + APIVERSION version.Version = "1.12" DEFAULTHTTPHOST = "127.0.0.1" DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) @@ -30,7 +31,7 @@ func DisplayablePorts(ports *engine.Table) string { ports.Sort() for _, port := range ports.Data { if port.Get("IP") == "" { - result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type"))) + result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PrivatePort"), port.Get("Type"))) } else { result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) } diff --git a/api/container.go b/api/container.go deleted file mode 100644 index 4cc73b2252..0000000000 --- a/api/container.go +++ /dev/null @@ -1,18 +0,0 @@ -package api - -import ( - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/runconfig" -) - -type Container struct { - Config runconfig.Config - HostConfig runconfig.HostConfig - State struct { - Running bool - ExitCode int - } - NetworkSettings struct { - Ports nat.PortMap - } -} diff --git a/api/server/server.go b/api/server/server.go index 3c93a3478d..61407b2648 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -122,17 +122,17 @@ func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter var ( authConfig, err = ioutil.ReadAll(r.Body) job = eng.Job("auth") - status string + stdoutBuffer = bytes.NewBuffer(nil) ) if err != nil { return err } job.Setenv("authConfig", string(authConfig)) - job.Stdout.AddString(&status) + job.Stdout.Add(stdoutBuffer) if err = job.Run(); err != nil { return err } - if status != "" { + if status := engine.Tail(stdoutBuffer, 1); status != "" { var env engine.Env env.Set("Status", status) return writeJSON(w, http.StatusOK, env) @@ -165,6 +165,36 @@ func postContainersKill(eng *engine.Engine, version version.Version, w http.Resp return nil } +func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("pause", vars["name"]) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("unpause", vars["name"]) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") @@ -188,6 +218,8 @@ func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseW job = eng.Job("images") ) + job.Setenv("filters", r.Form.Get("filters")) + // FIXME this parameter could just be a match filter job.Setenv("filter", r.Form.Get("filter")) job.Setenv("all", r.Form.Get("all")) @@ -244,7 +276,7 @@ func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWrite return err } - var job = eng.Job("events", r.RemoteAddr) + var job = eng.Job("events") streamJSON(job, w, true) job.Setenv("since", r.Form.Get("since")) job.Setenv("until", r.Form.Get("until")) @@ -338,7 +370,7 @@ func getContainersLogs(eng *engine.Engine, version version.Version, w http.Respo } var ( - job = eng.Job("inspect", vars["name"], "container") + job = eng.Job("container_inspect", vars["name"]) c, err = job.Stdout.AddEnv() ) if err != nil { @@ -393,9 +425,10 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit return err } var ( - config engine.Env - env engine.Env - job = eng.Job("commit", r.Form.Get("container")) + config engine.Env + env engine.Env + job = eng.Job("commit", r.Form.Get("container")) + stdoutBuffer = bytes.NewBuffer(nil) ) if err := config.Decode(r.Body); err != nil { utils.Errorf("%s", err) @@ -407,12 +440,11 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit job.Setenv("comment", r.Form.Get("comment")) job.SetenvSubEnv("config", &config) - var id string - job.Stdout.AddString(&id) + job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } - env.Set("Id", id) + env.Set("Id", engine.Tail(stdoutBuffer, 1)) return writeJSON(w, http.StatusCreated, env) } @@ -502,32 +534,6 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons return job.Run() } -// FIXME: 'insert' is deprecated as of 0.10, and should be removed in a future version. -func postImagesInsert(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("insert", vars["name"], r.Form.Get("url"), r.Form.Get("path")) - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, false) - } else { - job.Stdout.Add(w) - } - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) - } - - return nil -} - func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") @@ -603,17 +609,17 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re return nil } var ( - out engine.Env - job = eng.Job("create", r.Form.Get("name")) - outWarnings []string - outId string - warnings = bytes.NewBuffer(nil) + out engine.Env + job = eng.Job("create", r.Form.Get("name")) + outWarnings []string + stdoutBuffer = bytes.NewBuffer(nil) + warnings = bytes.NewBuffer(nil) ) if err := job.DecodeEnv(r.Body); err != nil { return err } // Read container ID from the first line of stdout - job.Stdout.AddString(&outId) + job.Stdout.Add(stdoutBuffer) // Read warnings from stderr job.Stderr.Add(warnings) if err := job.Run(); err != nil { @@ -624,7 +630,7 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re for scanner.Scan() { outWarnings = append(outWarnings, scanner.Text()) } - out.Set("Id", outId) + out.Set("Id", engine.Tail(stdoutBuffer, 1)) out.SetList("Warnings", outWarnings) return writeJSON(w, http.StatusCreated, out) } @@ -720,20 +726,16 @@ func postContainersWait(eng *engine.Engine, version version.Version, w http.Resp return fmt.Errorf("Missing parameter") } var ( - env engine.Env - status string - job = eng.Job("wait", vars["name"]) + env engine.Env + stdoutBuffer = bytes.NewBuffer(nil) + job = eng.Job("wait", vars["name"]) ) - job.Stdout.AddString(&status) + job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } - // Parse a 16-bit encoded integer to map typical unix exit status. - _, err := strconv.ParseInt(status, 10, 16) - if err != nil { - return err - } - env.Set("StatusCode", status) + + env.Set("StatusCode", engine.Tail(stdoutBuffer, 1)) return writeJSON(w, http.StatusOK, env) } @@ -759,7 +761,7 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re } var ( - job = eng.Job("inspect", vars["name"], "container") + job = eng.Job("container_inspect", vars["name"]) c, err = job.Stdout.AddEnv() ) if err != nil { @@ -823,7 +825,7 @@ func wsContainersAttach(eng *engine.Engine, version version.Version, w http.Resp return fmt.Errorf("Missing parameter") } - if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil { + if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil { return err } @@ -851,9 +853,11 @@ func getContainersByName(eng *engine.Engine, version version.Version, w http.Res if vars == nil { return fmt.Errorf("Missing parameter") } - var job = eng.Job("inspect", vars["name"], "container") + var job = eng.Job("container_inspect", vars["name"]) + if version.LessThan("1.12") { + job.SetenvBool("dirty", true) + } streamJSON(job, w, false) - job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job return job.Run() } @@ -861,9 +865,11 @@ func getImagesByName(eng *engine.Engine, version version.Version, w http.Respons if vars == nil { return fmt.Errorf("Missing parameter") } - var job = eng.Job("inspect", vars["name"], "image") + var job = eng.Job("image_inspect", vars["name"]) + if version.LessThan("1.12") { + job.SetenvBool("dirty", true) + } streamJSON(job, w, false) - job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job return job.Run() } @@ -907,12 +913,20 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } + + if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else { + job.Setenv("rm", r.FormValue("rm")) + } job.Stdin.Add(r.Body) job.Setenv("remote", r.FormValue("remote")) job.Setenv("t", r.FormValue("t")) job.Setenv("q", r.FormValue("q")) job.Setenv("nocache", r.FormValue("nocache")) - job.Setenv("rm", r.FormValue("rm")) + job.Setenv("forcerm", r.FormValue("forcerm")) job.SetenvJson("authConfig", authConfig) job.SetenvJson("configFile", configFile) @@ -1071,12 +1085,13 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st "/commit": postCommit, "/build": postBuild, "/images/create": postImagesCreate, - "/images/{name:.*}/insert": postImagesInsert, "/images/load": postImagesLoad, "/images/{name:.*}/push": postImagesPush, "/images/{name:.*}/tag": postImagesTag, "/containers/create": postContainersCreate, "/containers/{name:.*}/kill": postContainersKill, + "/containers/{name:.*}/pause": postContainersPause, + "/containers/{name:.*}/unpause": postContainersUnpause, "/containers/{name:.*}/restart": postContainersRestart, "/containers/{name:.*}/start": postContainersStart, "/containers/{name:.*}/stop": postContainersStop, @@ -1193,6 +1208,7 @@ func changeGroup(addr string, nameOrGid string) error { // ListenAndServe sets up the required http.Server and gets it listening for // each addr passed in and does protocol specific checking. func ListenAndServe(proto, addr string, job *engine.Job) error { + var l net.Listener r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { return err @@ -1208,7 +1224,20 @@ func ListenAndServe(proto, addr string, job *engine.Job) error { } } - l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock) + var oldmask int + if proto == "unix" { + oldmask = syscall.Umask(0777) + } + + if job.GetenvBool("BufferRequests") { + l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock) + } else { + l, err = net.Listen(proto, addr) + } + + if proto == "unix" { + syscall.Umask(oldmask) + } if err != nil { return err } @@ -1246,9 +1275,6 @@ func ListenAndServe(proto, addr string, job *engine.Job) error { log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } case "unix": - if err := os.Chmod(addr, 0660); err != nil { - return err - } socketGroup := job.Getenv("SocketGroup") if socketGroup != "" { if err := changeGroup(addr, socketGroup); err != nil { @@ -1260,6 +1286,9 @@ func ListenAndServe(proto, addr string, job *engine.Job) error { } } } + if err := os.Chmod(addr, 0660); err != nil { + return err + } default: return fmt.Errorf("Invalid protocol format.") } @@ -1280,10 +1309,6 @@ func ServeApi(job *engine.Job) engine.Status { ) activationLock = make(chan struct{}) - if err := job.Eng.Register("acceptconnections", AcceptConnections); err != nil { - return job.Error(err) - } - for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { @@ -1310,7 +1335,9 @@ func AcceptConnections(job *engine.Job) engine.Status { go systemd.SdNotify("READY=1") // close the lock so the listeners start accepting connections - close(activationLock) + if activationLock != nil { + close(activationLock) + } return engine.StatusOK } diff --git a/archive/README.md b/archive/README.md new file mode 100644 index 0000000000..4eb0c04181 --- /dev/null +++ b/archive/README.md @@ -0,0 +1,3 @@ +This code provides helper functions for dealing with archive files. + +**TODO**: Move this to either `pkg` or (if not possible) to `utils`. diff --git a/archive/archive.go b/archive/archive.go index 2fac18e99f..76c6e31289 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -1,14 +1,12 @@ package archive import ( + "bufio" "bytes" "compress/bzip2" "compress/gzip" "errors" "fmt" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "os" @@ -17,6 +15,10 @@ import ( "path/filepath" "strings" "syscall" + + "github.com/dotcloud/docker/pkg/system" + "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) type ( @@ -26,6 +28,7 @@ type ( TarOptions struct { Includes []string Compression Compression + NoLchown bool } ) @@ -41,26 +44,16 @@ const ( ) func DetectCompression(source []byte) Compression { - sourceLen := len(source) for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, } { - fail := false - if len(m) > sourceLen { + if len(source) < len(m) { utils.Debugf("Len too short") continue } - i := 0 - for _, b := range m { - if b != source[i] { - fail = true - break - } - i++ - } - if !fail { + if bytes.Compare(m, source[:len(m)]) == 0 { return compression } } @@ -74,31 +67,24 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, error) { } func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - buf := make([]byte, 10) - totalN := 0 - for totalN < 10 { - n, err := archive.Read(buf[totalN:]) - if err != nil { - if err == io.EOF { - return nil, fmt.Errorf("Tarball too short") - } - return nil, err - } - totalN += n - utils.Debugf("[tar autodetect] n: %d", n) + buf := bufio.NewReader(archive) + bs, err := buf.Peek(10) + if err != nil { + return nil, err } - compression := DetectCompression(buf) - wrap := io.MultiReader(bytes.NewReader(buf), archive) + utils.Debugf("[tar autodetect] n: %v", bs) + + compression := DetectCompression(bs) switch compression { case Uncompressed: - return ioutil.NopCloser(wrap), nil + return ioutil.NopCloser(buf), nil case Gzip: - return gzip.NewReader(wrap) + return gzip.NewReader(buf) case Bzip2: - return ioutil.NopCloser(bzip2.NewReader(wrap)), nil + return ioutil.NopCloser(bzip2.NewReader(buf)), nil case Xz: - return xzDecompress(wrap) + return xzDecompress(buf) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } @@ -194,7 +180,7 @@ func addTarFile(path, name string, tw *tar.Writer) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -255,7 +241,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) e return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) } - if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown { return err } @@ -309,8 +295,11 @@ func escapeName(name string) string { return string(escaped) } -// Tar creates an archive from the directory at `path`, only including files whose relative -// paths are included in `filter`. If `filter` is nil, then all files are included. +// TarFilter creates an archive from the directory at `srcPath` with `options`, and returns it as a +// stream of bytes. +// +// Files are included according to `options.Includes`, default to including all files. +// Stream is compressed according to `options.Compression', default to Uncompressed. func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) { pipeReader, pipeWriter := io.Pipe() @@ -418,14 +407,16 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error { // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { + if fi.IsDir() && hdr.Name == "." { + continue + } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } - - if err := createTarFile(path, dest, hdr, tr); err != nil { + if err := createTarFile(path, dest, hdr, tr, options == nil || !options.NoLchown); err != nil { return err } diff --git a/archive/archive_test.go b/archive/archive_test.go index 412660139c..72ffd99565 100644 --- a/archive/archive_test.go +++ b/archive/archive_test.go @@ -3,7 +3,6 @@ package archive import ( "bytes" "fmt" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "os" @@ -11,6 +10,8 @@ import ( "path" "testing" "time" + + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) func TestCmdStreamLargeStderr(t *testing.T) { @@ -132,8 +133,37 @@ func TestTarUntar(t *testing.T) { // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - err := createTarFile("pax_global_header", "some_dir", &hdr, nil) + err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true) if err != nil { t.Fatal(err) } } + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatal("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} diff --git a/archive/changes.go b/archive/changes.go index 723e4a7425..1e588b8eb5 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -3,15 +3,16 @@ package archive import ( "bytes" "fmt" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "os" "path/filepath" "strings" "syscall" "time" + + "github.com/dotcloud/docker/pkg/system" + "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) type ChangeType int @@ -293,13 +294,23 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) { // Compare two directories and generate an array of Change objects describing the changes func ChangesDirs(newDir, oldDir string) ([]Change, error) { - oldRoot, err := collectFileInfo(oldDir) - if err != nil { - return nil, err - } - newRoot, err := collectFileInfo(newDir) - if err != nil { - return nil, err + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, err + } } return newRoot.Changes(oldRoot), nil @@ -341,12 +352,13 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { whiteOutDir := filepath.Dir(change.Path) whiteOutBase := filepath.Base(change.Path) whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) + timestamp := time.Now() hdr := &tar.Header{ Name: whiteOut[1:], Size: 0, - ModTime: time.Now(), - AccessTime: time.Now(), - ChangeTime: time.Now(), + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, } if err := tw.WriteHeader(hdr); err != nil { utils.Debugf("Can't write whiteout header: %s\n", err) diff --git a/archive/diff.go b/archive/diff.go index 87e8ac7dc4..d169669126 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -2,14 +2,14 @@ package archive import ( "fmt" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "os" "path/filepath" "strings" "syscall" - "time" + + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. @@ -18,15 +18,6 @@ import ( func mkdev(major int64, minor int64) uint32 { return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) } -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. @@ -89,7 +80,7 @@ func ApplyLayer(dest string, layer ArchiveReader) error { } defer os.RemoveAll(aufsTempdir) } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr); err != nil { + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil { return err } } @@ -136,7 +127,7 @@ func ApplyLayer(dest string, layer ArchiveReader) error { srcData = tmpFile } - if err := createTarFile(path, dest, srcHdr, srcData); err != nil { + if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil { return err } diff --git a/archive/testdata/broken.tar b/archive/testdata/broken.tar new file mode 100644 index 0000000000..8f10ea6b87 Binary files /dev/null and b/archive/testdata/broken.tar differ diff --git a/archive/time_linux.go b/archive/time_linux.go new file mode 100644 index 0000000000..3448569b1e --- /dev/null +++ b/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/archive/time_unsupported.go b/archive/time_unsupported.go new file mode 100644 index 0000000000..e85aac0540 --- /dev/null +++ b/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/builtins/builtins.go b/builtins/builtins.go index 40d421f154..3e0041c9d7 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -1,11 +1,16 @@ package builtins import ( - api "github.com/dotcloud/docker/api/server" + "runtime" + + "github.com/dotcloud/docker/api" + apiserver "github.com/dotcloud/docker/api/server" "github.com/dotcloud/docker/daemon/networkdriver/bridge" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/server" + "github.com/dotcloud/docker/utils" ) func Register(eng *engine.Engine) error { @@ -15,12 +20,18 @@ func Register(eng *engine.Engine) error { if err := remote(eng); err != nil { return err } + if err := eng.Register("version", dockerVersion); err != nil { + return err + } return registry.NewService().Install(eng) } // remote: a RESTful api for cross-docker communication func remote(eng *engine.Engine) error { - return eng.Register("serveapi", api.ServeApi) + if err := eng.Register("serveapi", apiserver.ServeApi); err != nil { + return err + } + return eng.Register("acceptconnections", apiserver.AcceptConnections) } // daemon: a default execution and storage backend for Docker on Linux, @@ -44,3 +55,21 @@ func daemon(eng *engine.Engine) error { } return eng.Register("init_networkdriver", bridge.InitDriver) } + +// builtins jobs independent of any subsystem +func dockerVersion(job *engine.Job) engine.Status { + v := &engine.Env{} + v.Set("Version", dockerversion.VERSION) + v.SetJson("ApiVersion", api.APIVERSION) + v.Set("GitCommit", dockerversion.GITCOMMIT) + v.Set("GoVersion", runtime.Version()) + v.Set("Os", runtime.GOOS) + v.Set("Arch", runtime.GOARCH) + if kernelVersion, err := utils.GetKernelVersion(); err == nil { + v.Set("KernelVersion", kernelVersion.String()) + } + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/contrib/check-config.sh b/contrib/check-config.sh index 498ede8af3..8dd618cb67 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -116,7 +116,7 @@ fi flags=( NAMESPACES {NET,PID,IPC,UTS}_NS DEVPTS_MULTIPLE_INSTANCES - CGROUPS CGROUP_DEVICE + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_SCHED MACVLAN VETH BRIDGE NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index e6a191d32b..e2ddd2accf 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -539,7 +539,7 @@ _docker_search() case "$cur" in -*) - COMPREPLY=( $( compgen -W "--no-trunc -t --trusted -s --stars" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) ) ;; *) ;; diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index e3bb72aebe..a7fd52e312 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -71,7 +71,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" # build -complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build a container from a Dockerfile' +complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' @@ -229,7 +229,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image in the docker index' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least xxx stars' -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s t -l trusted -d 'Only show trusted builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' # start complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container' diff --git a/contrib/desktop-integration/data/Dockerfile b/contrib/desktop-integration/data/Dockerfile index 76846af912..236912f904 100644 --- a/contrib/desktop-integration/data/Dockerfile +++ b/contrib/desktop-integration/data/Dockerfile @@ -6,7 +6,7 @@ # /data volume is owned by sysadmin. # USAGE: # # Download data Dockerfile -# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile +# wget http://raw.githubusercontent.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile # # # Build data image # docker build -t data . diff --git a/contrib/desktop-integration/iceweasel/Dockerfile b/contrib/desktop-integration/iceweasel/Dockerfile index f9f58c9ca5..80d6a55e4a 100644 --- a/contrib/desktop-integration/iceweasel/Dockerfile +++ b/contrib/desktop-integration/iceweasel/Dockerfile @@ -7,7 +7,7 @@ # sound devices. Tested on Debian 7.2 # USAGE: # # Download Iceweasel Dockerfile -# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile +# wget http://raw.githubusercontent.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile # # # Build iceweasel image # docker build -t iceweasel . diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker index 67f0d2807f..9b50fad448 100755 --- a/contrib/init/sysvinit-debian/docker +++ b/contrib/init/sysvinit-debian/docker @@ -4,6 +4,8 @@ # Provides: docker # Required-Start: $syslog $remote_fs # Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Create lightweight, portable, self-sufficient containers. diff --git a/contrib/init/sysvinit-redhat/docker b/contrib/init/sysvinit-redhat/docker index 2b75c6903f..06699f6ab1 100755 --- a/contrib/init/sysvinit-redhat/docker +++ b/contrib/init/sysvinit-redhat/docker @@ -3,7 +3,7 @@ # /etc/rc.d/init.d/docker # # Daemon for docker.io -# +# # chkconfig: 2345 95 95 # description: Daemon for docker.io @@ -49,6 +49,13 @@ start() { $exec -d $other_args &>> $logfile & pid=$! touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/dotcloud/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + done success echo else diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf index e27d77e145..5a3f88887e 100644 --- a/contrib/init/upstart/docker.conf +++ b/contrib/init/upstart/docker.conf @@ -1,6 +1,6 @@ description "Docker daemon" -start on filesystem +start on (local-filesystems and net-device-up IFACE!=lo) stop on runlevel [!2345] limit nofile 524288 1048576 limit nproc 524288 1048576 diff --git a/contrib/man/md/Dockerfile.5.md b/contrib/man/md/Dockerfile.5.md new file mode 100644 index 0000000000..1d191e0f9c --- /dev/null +++ b/contrib/man/md/Dockerfile.5.md @@ -0,0 +1,206 @@ +% DOCKERFILE(5) Docker User Manuals +% Zac Dover +% May 2014 +# NAME + +Dockerfile - automate the steps of creating a Docker image + +# INTRODUCTION +The **Dockerfile** is a configuration file that automates the steps of creating +a Docker image. It is similar to a Makefile. Docker reads instructions from the +**Dockerfile** to automate the steps otherwise performed manually to create an +image. To build an image, create a file called **Dockerfile**. The +**Dockerfile** describes the steps taken to assemble the image. When the +**Dockerfile** has been created, call the **docker build** command, using the +path of directory that contains **Dockerfile** as the argument. + +# SYNOPSIS + +INSTRUCTION arguments + +For example: + +FROM image + +# DESCRIPTION + +A Dockerfile is a file that automates the steps of creating a Docker image. +A Dockerfile is similar to a Makefile. + +# USAGE + +**sudo docker build .** + -- runs the steps and commits them, building a final image + The path to the source repository defines where to find the context of the + build. The build is run by the docker daemon, not the CLI. The whole + context must be transferred to the daemon. The Docker CLI reports + "Sending build context to Docker daemon" when the context is sent to the daemon. + +**sudo docker build -t repository/tag .** + -- specifies a repository and tag at which to save the new image if the build + succeeds. The Docker daemon runs the steps one-by-one, commiting the result + to a new image if necessary before finally outputting the ID of the new + image. The Docker daemon automatically cleans up the context it is given. + +Docker re-uses intermediate images whenever possible. This significantly +accelerates the *docker build* process. + +# FORMAT + +**FROM image** +or +**FROM image:tag** + -- The FROM instruction sets the base image for subsequent instructions. A + valid Dockerfile must have FROM as its first instruction. The image can be any + valid image. It is easy to start by pulling an image from the public + repositories. + -- FROM must be he first non-comment instruction in Dockerfile. + -- FROM may appear multiple times within a single Dockerfile in order to create + multiple images. Make a note of the last image id output by the commit before + each new FROM command. + -- If no tag is given to the FROM instruction, latest is assumed. If the used + tag does not exist, an error is returned. + +**MAINTAINER** + --The MAINTAINER instruction sets the Author field for the generated images. + +**RUN** + --RUN has two forms: + **RUN ** + -- (the command is run in a shell - /bin/sh -c) + **RUN ["executable", "param1", "param2"]** + --The above is executable form. + --The RUN instruction executes any commands in a new layer on top of the + current image and commits the results. The committed image is used for the next + step in Dockerfile. + --Layering RUN instructions and generating commits conforms to the core + concepts of Docker where commits are cheap and containers can be created from + any point in the history of an image. This is similar to source control. The + exec form makes it possible to avoid shell string munging. The exec form makes + it possible to RUN commands using a base image that does not contain /bin/sh. + +**CMD** + --CMD has three forms: + **CMD ["executable", "param1", "param2"]** This is the preferred form, the + exec form. + **CMD ["param1", "param2"]** This command provides default parameters to + ENTRYPOINT) + **CMD command param1 param2** This command is run as a shell. + --There can be only one CMD in a Dockerfile. If more than one CMD is listed, only + the last CMD takes effect. + The main purpose of a CMD is to provide defaults for an executing container. + These defaults may include an executable, or they can omit the executable. If + they omit the executable, an ENTRYPOINT must be specified. + When used in the shell or exec formats, the CMD instruction sets the command to + be executed when running the image. + If you use the shell form of of the CMD, the executes in /bin/sh -c: + **FROM ubuntu** + **CMD echo "This is a test." | wc -** + If you run wihtout a shell, then you must express the command as a + JSON arry and give the full path to the executable. This array form is the + preferred form of CMD. All additional parameters must be individually expressed + as strings in the array: + **FROM ubuntu** + **CMD ["/usr/bin/wc","--help"]** + To make the container run the same executable every time, use ENTRYPOINT in + combination with CMD. + If the user specifies arguments to docker run, the specified commands override + the default in CMD. + Do not confuse **RUN** with **CMD**. RUN runs a command and commits the result. CMD + executes nothing at build time, but specifies the intended command for the + image. + +**EXPOSE** + --**EXPOSE [...]** + The **EXPOSE** instruction informs Docker that the container listens on the + specified network ports at runtime. Docker uses this information to + interconnect containers using links, and to set up port redirection on the host + system. + +**ENV** + --**ENV ** + The ENV instruction sets the environment variable to + the value . This value is passed to all future RUN instructions. This is + functionally equivalent to prefixing the command with **=**. The + environment variables that are set with ENV persist when a container is run + from the resulting image. Use docker inspect to inspect these values, and + change them using docker run **--env =.** + + Note that setting Setting **ENV DEBIAN_FRONTEND noninteractive** may cause + unintended consequences, because it will persist when the container is run + interactively, as with the following command: **docker run -t -i image bash** + +**ADD** + --**ADD ** The ADD instruction copies new files from and adds them + to the filesystem of the container at path . must be the path to a + file or directory relative to the source directory that is being built (the + context of the build) or a remote file URL. is the absolute path to + which the source is copied inside the target container. All new files and + directories are created with mode 0755, with uid and gid 0. + +**ENTRYPOINT** + --**ENTRYPOINT** has two forms: ENTRYPOINT ["executable", "param1", "param2"] + (This is like an exec, and is the preferred form.) ENTRYPOINT command param1 + param2 (This is running as a shell.) An ENTRYPOINT helps you configure a + container that can be run as an executable. When you specify an ENTRYPOINT, + the whole container runs as if it was only that executable. The ENTRYPOINT + instruction adds an entry command that is not overwritten when arguments are + passed to docker run. This is different from the behavior of CMD. This allows + arguments to be passed to the entrypoint, for instance docker run -d + passes the -d argument to the ENTRYPOINT. Specify parameters either in the + ENTRYPOINT JSON array (as in the preferred exec form above), or by using a CMD + statement. Parameters in the ENTRYPOINT are not overwritten by the docker run + arguments. Parameters specifies via CMD are overwritten by docker run + arguments. Specify a plain string for the ENTRYPOINT, and it will execute in + /bin/sh -c, like a CMD instruction: + FROM ubuntu + ENTRYPOINT wc -l - + This means that the Dockerfile's image always takes stdin as input (that's + what "-" means), and prints the number of lines (that's what "-l" means). To + make this optional but default, use a CMD: + FROM ubuntu + CMD ["-l", "-"] + ENTRYPOINT ["/usr/bin/wc"] + +**VOLUME** + --**VOLUME ["/data"]** + The VOLUME instruction creates a mount point with the specified name and marks + it as holding externally-mounted volumes from the native host or from other + containers. + +**USER** + -- **USER daemon** + The USER instruction sets the username or UID that is used when running the + image. + +**WORKDIR** + -- **WORKDIR /path/to/workdir** + The WORKDIR instruction sets the working directory for the **RUN**, **CMD**, and **ENTRYPOINT** Dockerfile commands that follow it. + It can be used multiple times in a single Dockerfile. Relative paths are defined relative to the path of the previous **WORKDIR** instruction. For example: + **WORKDIR /a WORKDIR /b WORKDIR c RUN pwd** + In the above example, the output of the **pwd** command is **a/b/c**. + +**ONBUILD** + -- **ONBUILD [INSTRUCTION]** + The ONBUILD instruction adds a trigger instruction to the image, which is + executed at a later time, when the image is used as the base for another + build. The trigger is executed in the context of the downstream build, as + if it had been inserted immediately after the FROM instruction in the + downstream Dockerfile. Any build instruction can be registered as a + trigger. This is useful if you are building an image to be + used as a base for building other images, for example an application build + environment or a daemon to be customized with a user-specific + configuration. For example, if your image is a reusable python + application builder, it requires application source code to be + added in a particular directory, and might require a build script + to be called after that. You can't just call ADD and RUN now, because + you don't yet have access to the application source code, and it + is different for each application build. Providing + application developers with a boilerplate Dockerfile to copy-paste + into their application is inefficient, error-prone, and + difficult to update because it mixes with application-specific code. + The solution is to use **ONBUILD** to register instructions in advance, to + run later, during the next build stage. + +# HISTORY +*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.io Dockerfile documentation. diff --git a/contrib/man/md/docker-attach.1.md b/contrib/man/md/docker-attach.1.md index 2755fd27f5..5a3b7a2856 100644 --- a/contrib/man/md/docker-attach.1.md +++ b/contrib/man/md/docker-attach.1.md @@ -9,10 +9,11 @@ docker-attach - Attach to a running container # DESCRIPTION If you **docker run** a container in detached mode (**-d**), you can reattach to - the detached container with **docker attach** using the container's ID or name. +the detached container with **docker attach** using the container's ID or name. -You can detach from the container again (and leave it running) with `CTRL-c` (for -a quiet exit) or `CTRL-\` to get a stacktrace of the Docker client when it quits. +You can detach from the container again (and leave it running) with `CTRL-q +CTRL-q` (for a quiet exit), or `CTRL-c` which will send a SIGKILL to the +container, or `CTRL-\` to get a stacktrace of the Docker client when it quits. When you detach from a container the exit code will be returned to the client. diff --git a/contrib/man/md/docker-build.1.md b/contrib/man/md/docker-build.1.md index b3e9a2842e..3c031445aa 100644 --- a/contrib/man/md/docker-build.1.md +++ b/contrib/man/md/docker-build.1.md @@ -2,7 +2,7 @@ % William Henry % APRIL 2014 # NAME -docker-build - Build a container image from a Dockerfile source at PATH +docker-build - Build an image from a Dockerfile source at PATH # SYNOPSIS **docker build** [**--no-cache**[=*false*]] [**-q**|**--quiet**[=*false*]] @@ -17,7 +17,7 @@ be used by **ADD** commands found within the Dockerfile. Warning, this will send a lot of data to the Docker daemon depending on the contents of the current directory. The build is run by the Docker daemon, not by the CLI, so the whole context must be transferred to the daemon. -The Docker CLI reports "Uploading context" when the context is sent to +The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to the daemon. When a single Dockerfile is given as the URL, then no context is set. @@ -34,8 +34,9 @@ as context. build process. The default is true. **-t**, **--tag**=*tag* - Tag to be applied to the resulting image on successful completion of -the build. + The name to be applied to the resulting image on successful completion of +the build. `tag` in this context means the entire image name including the +optional TAG after the ':'. **--no-cache**=*true*|*false* When set to true, do not use a cache when building the image. The @@ -66,6 +67,40 @@ in the Dockerfile. Note: If you include a tar file (a good practice!), then Docker will automatically extract the contents of the tar file specified within the `ADD` instruction into the specified target. +## Building an image and naming that image + +A good practice is to give a name to the image you are building. There are +no hard rules here but it is best to give the names consideration. + +The **-t**/**--tag** flag is used to rename an image. Here are some examples: + +Though it is not a good practice, image names can be arbtrary: + + docker build -t myimage . + +A better approach is to provide a fully qualified and meaningful repository, +name, and tag (where the tag in this context means the qualifier after +the ":"). In this example we build a JBoss image for the Fedora repository +and give it the version 1.0: + + docker build -t fedora/jboss:1.0 + +The next example is for the "whenry" user repository and uses Fedora and +JBoss and gives it the version 2.1 : + + docker build -t whenry/fedora-jboss:V2.1 + +If you do not provide a version tag then Docker will assign `latest`: + + docker build -t whenry/fedora-jboss + +When you list the images, the image above will have the tag `latest`. + +So renaming an image is arbitrary but consideration should be given to +a useful convention that makes sense for consumers and should also take +into account Docker community conventions. + + ## Building an image using a URL This will clone the specified Github repository from the URL and use it diff --git a/contrib/man/md/docker-run.1.md b/contrib/man/md/docker-run.1.md index 56364f9d5f..447d9e13c3 100644 --- a/contrib/man/md/docker-run.1.md +++ b/contrib/man/md/docker-run.1.md @@ -14,8 +14,8 @@ docker-run - Run a process in an isolated container [**-e**|**--env**=*environment*] [**--entrypoint**=*command*] [**--expose**=*port*] [**-P**|**--publish-all**[=*false*]] [**-p**|**--publish**=*port-mappping*] [**-h**|**--hostname**=*hostname*] -[**--rm**[=*false*]] [**--priviledged**[=*false*] -[**-i**|**--interactive**[=*false*] +[**--rm**[=*false*]] [**--privileged**[=*false*]] +[**-i**|**--interactive**[=*false*]] [**-t**|**--tty**[=*false*]] [**--lxc-conf**=*options*] [**-n**|**--networking**[=*true*]] [**-v**|**--volume**=*volume*] [**--volumes-from**=*container-id*] @@ -64,6 +64,9 @@ the other shell to view a list of the running containers. You can reattach to a detached container with **docker attach**. If you choose to run a container in the detached mode, then you cannot use the **-rm** option. + When attached in the tty mode, you can detach from a running container without +stopping the process by pressing the keys CTRL-P CTRL-Q. + **--dns**=*IP-address* Set custom DNS servers. This option can be used to override the DNS @@ -100,8 +103,8 @@ container can be started with the **--link**. **-m**, **-memory**=*memory-limit* Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical -RAM. The memory limit format: , where unit = b, k, m or -g. +RAM. If a limit of 0 is specified, the container's memory is not limited. The +memory limit format: , where unit = b, k, m or g. **-P**, **-publish-all**=*true*|*false* When set to true publish all exposed ports to the host interfaces. The @@ -164,7 +167,7 @@ and foreground Docker containers. Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices. -When the operator executes **docker run -privileged**, Docker will enable access +When the operator executes **docker run --privileged**, Docker will enable access to all devices on the host as well as set some configuration in AppArmor to allow the container nearly all the same access to the host as processes running outside of a container on the host. @@ -190,18 +193,28 @@ interactive shell. The default is value is false. Set a username or UID for the container. -**-v**, **-volume**=*volume* - Bind mount a volume to the container. The **-v** option can be used one or +**-v**, **-volume**=*volume*[:ro|:rw] + Bind mount a volume to the container. + +The **-v** option can be used one or more times to add one or more mounts to a container. These mounts can then be -used in other containers using the **--volumes-from** option. See examples. +used in other containers using the **--volumes-from** option. +The volume may be optionally suffixed with :ro or :rw to mount the volumes in +read-only or read-write mode, respectively. By default, the volumes are mounted +read-write. See examples. -**--volumes-from**=*container-id* +**--volumes-from**=*container-id*[:ro|:rw] Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the **--volumes-from** option when running those other containers. The volumes can be shared even if the original container with the -mount is not running. +mount is not running. + +The container ID may be optionally suffixed with :ro or +:rw to mount the volumes in read-only or read-write mode, respectively. By +default, the volumes are mounted in the same mode (read write or read only) as +the reference container. **-w**, **-workdir**=*directory* @@ -227,7 +240,7 @@ can override the working directory by using the **-w** option. ## Exposing log messages from the container to the host's log If you want messages that are logged in your container to show up in the host's -syslog/journal then you should bind mount the /var/log directory as follows. +syslog/journal then you should bind mount the /dev/log directory as follows. # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash @@ -307,7 +320,7 @@ fedora-data image: # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash -Multiple -volumes-from parameters will bring together multiple data volumes from +Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data: diff --git a/contrib/man/md/docker-search.1.md b/contrib/man/md/docker-search.1.md index fb2c921f8a..945dd34e59 100644 --- a/contrib/man/md/docker-search.1.md +++ b/contrib/man/md/docker-search.1.md @@ -5,7 +5,7 @@ docker-search - Search the docker index for images # SYNOPSIS -**docker search** **--no-trunc**[=*false*] **-t**|**--trusted**[=*false*] +**docker search** **--no-trunc**[=*false*] **--automated**[=*false*] **-s**|**--stars**[=*0*] TERM # DESCRIPTION @@ -13,7 +13,7 @@ docker-search - Search the docker index for images Search an index for an image with that matches the term TERM. The table of images returned displays the name, description (truncated by default), number of stars awarded, whether the image is official, and whether it -is trusted. +is automated. # OPTIONS **--no-trunc**=*true*|*false* @@ -23,8 +23,8 @@ is trusted. Only displays with at least NUM (integer) stars. I.e. only those images ranked >=NUM. -**-t**, **--trusted**=*true*|*false* - When true only show trusted builds. The default is false. +**--automated**=*true*|*false* + When true only show automated builds. The default is false. # EXAMPLE @@ -34,19 +34,19 @@ Search the registry for the term 'fedora' and only display those images ranked 3 or higher: $ sudo docker search -s 3 fedora - NAME DESCRIPTION STARS OFFICIAL TRUSTED + NAME DESCRIPTION STARS OFFICIAL AUTOMATED mattdm/fedora A basic Fedora image corresponding roughly... 50 fedora (Semi) Official Fedora base image. 38 mattdm/fedora-small A small Fedora image on which to build. Co... 8 goldmann/wildfly A WildFly application server running on a ... 3 [OK] -## Search the registry for trusted images +## Search the registry for automated images -Search the registry for the term 'fedora' and only display trusted images +Search the registry for the term 'fedora' and only display automated images ranked 1 or higher: $ sudo docker search -s 1 -t fedora - NAME DESCRIPTION STARS OFFICIAL TRUSTED + NAME DESCRIPTION STARS OFFICIAL AUTOMATED goldmann/wildfly A WildFly application server running on a ... 3 [OK] tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] diff --git a/contrib/man/md/docker-start.1.md b/contrib/man/md/docker-start.1.md index 9e639bbb5e..2815f1b07f 100644 --- a/contrib/man/md/docker-start.1.md +++ b/contrib/man/md/docker-start.1.md @@ -20,6 +20,10 @@ the process **-i**, **--interactive**=*true*|*false* When true attach to container's stdin +# NOTES +If run on a started container, start takes no action and succeeds +unconditionally. + # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.io source material and internal work. diff --git a/contrib/man/md/docker-tag.1.md b/contrib/man/md/docker-tag.1.md index 49f5a6c4d1..0c42769908 100644 --- a/contrib/man/md/docker-tag.1.md +++ b/contrib/man/md/docker-tag.1.md @@ -9,11 +9,12 @@ docker-tag - Tag an image in the repository IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG] # DESCRIPTION -This will tag an image in the repository. +This will give a new alias to an image in the repository. This refers to the +entire image name including the optional TAG after the ':'. # "OPTIONS" **-f**, **--force**=*true*|*false* - When set to true, force the tag name. The default is *false*. + When set to true, force the alias. The default is *false*. **REGISTRYHOST** The hostname of the registry if required. This may also include the port @@ -26,13 +27,16 @@ separated by a ':' The image name. **TAG** - The tag you are assigning to the image. + The tag you are assigning to the image. Though this is arbitrary it is +recommended to be used for a version to disinguish images with the same name. +Note that here TAG is a part of the overall name or "tag". # EXAMPLES -## Tagging an image +## Giving an image a new alias -Here is an example of tagging an image with the tag version1.0 : +Here is an example of aliasing an image (e.g. 0e5574283393) as "httpd" and +tagging it into the "fedora" repository with "version1.0": docker tag 0e5574283393 fedora/httpd:version1.0 diff --git a/contrib/man/md/docker.1.md b/contrib/man/md/docker.1.md index d1ddf192b5..f990e5162f 100644 --- a/contrib/man/md/docker.1.md +++ b/contrib/man/md/docker.1.md @@ -26,10 +26,10 @@ To see the man page for a command run **man docker **. **-D**=*true*|*false* Enable debug mode. Default is false. -**-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or +**-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host:port] to bind or unix://[/path/to/socket] to use. - Enable both the socket support and TCP on localhost. When host=[0.0.0.0], -port=[4243] or path =[/var/run/docker.sock] is omitted, default values are used. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. **--api-enable-cors**=*true*|*false* Enable CORS headers in the remote API. Default is false. @@ -73,7 +73,7 @@ port=[4243] or path =[/var/run/docker.sock] is omitted, default values are used. **-v**=*true*|*false* Print version information and quit. Default is false. -**--selinux-enabled=*true*|*false* +**--selinux-enabled**=*true*|*false* Enable selinux support. Default is false. # COMMANDS @@ -81,7 +81,7 @@ port=[4243] or path =[/var/run/docker.sock] is omitted, default values are used. Attach to a running container **docker-build(1)** - Build a container from a Dockerfile + Build an image from a Dockerfile **docker-commit(1)** Create a new image from a container's changes diff --git a/contrib/man/old-man/docker-build.1 b/contrib/man/old-man/docker-build.1 index 6546b7be2a..2d189eb0e3 100644 --- a/contrib/man/old-man/docker-build.1 +++ b/contrib/man/old-man/docker-build.1 @@ -3,7 +3,7 @@ .\" .TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker" .SH NAME -docker-build \- Build a container image from a Dockerfile source at PATH +docker-build \- Build an image from a Dockerfile source at PATH .SH SYNOPSIS .B docker build [\fB--no-cache\fR[=\fIfalse\fR] diff --git a/contrib/man/old-man/docker-run.1 b/contrib/man/old-man/docker-run.1 index fd449374e3..0e06e8d682 100644 --- a/contrib/man/old-man/docker-run.1 +++ b/contrib/man/old-man/docker-run.1 @@ -39,7 +39,7 @@ CPU shares in relative weight. You can increase the priority of a container with .TP .B -m, --memory=\fImemory-limit\fR: -Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. The memory limit format: , where unit = b, k, m or g. +Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. If a limit of 0 is specified, the container's memory is not limited. The memory limit format: , where unit = b, k, m or g. .TP .B --cidfile=\fIfile\fR: @@ -245,7 +245,7 @@ docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash .RE .sp .TP -Multiple -volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data: +Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data: .sp .RS docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash diff --git a/contrib/man/old-man/docker.1 b/contrib/man/old-man/docker.1 index 4a36e5baf5..95f60891cb 100644 --- a/contrib/man/old-man/docker.1 +++ b/contrib/man/old-man/docker.1 @@ -19,7 +19,7 @@ To see the man page for a command run \fBman docker \fR. Enable debug mode .TP .B\-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use. -When host=[0.0.0.0], port=[4243] or path +When host=[0.0.0.0], port=[2375] or path =[/var/run/docker.sock] is omitted, default values are used. .TP .B \-\-api-enable-cors=false @@ -69,7 +69,7 @@ Print version information and quit Attach to a running container .TP .B build -Build a container from a Dockerfile +Build an image from a Dockerfile .TP .B commit Create a new image from a container's changes diff --git a/contrib/mkimage-busybox.sh b/contrib/mkimage-busybox.sh index c1bb88c350..cbaa567834 100755 --- a/contrib/mkimage-busybox.sh +++ b/contrib/mkimage-busybox.sh @@ -2,6 +2,10 @@ # Generate a very minimal filesystem based on busybox-static, # and load it into the local docker under the name "busybox". +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' +echo >&2 + BUSYBOX=$(which busybox) [ "$BUSYBOX" ] || { echo "Sorry, I could not locate busybox." diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh index 613066e16b..808f393549 100755 --- a/contrib/mkimage-debootstrap.sh +++ b/contrib/mkimage-debootstrap.sh @@ -1,6 +1,10 @@ #!/usr/bin/env bash set -e +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' +echo >&2 + variant='minbase' include='iproute,iputils-ping' arch='amd64' # intentionally undocumented for now diff --git a/contrib/mkimage-rinse.sh b/contrib/mkimage-rinse.sh index dfe9999d92..0692ae1794 100755 --- a/contrib/mkimage-rinse.sh +++ b/contrib/mkimage-rinse.sh @@ -8,6 +8,10 @@ set -e +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' +echo >&2 + repo="$1" distro="$2" mirror="$3" diff --git a/contrib/mkimage.sh b/contrib/mkimage.sh new file mode 100755 index 0000000000..db4815c204 --- /dev/null +++ b/contrib/mkimage.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +optTemp=$(getopt --options '+d:t:h' --longoptions 'dir:,tag:,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar.xz" +touch "$tarFile" + +( + set -x + tar --numeric-owner -caf "$tarFile" -C "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <<'EOF' +FROM scratch +ADD rootfs.tar.xz / +EOF + +# if our generated image has a decent shell, let's set a default command +for shell in /bin/bash /usr/bin/fish /usr/bin/zsh /bin/sh; do + if [ -x "$rootfsDir/$shell" ]; then + ( set -x; echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/contrib/mkimage/.febootstrap-minimize b/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 0000000000..7dab4eb8b5 --- /dev/null +++ b/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + #rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/contrib/mkimage/busybox-static b/contrib/mkimage/busybox-static new file mode 100755 index 0000000000..e15322b49d --- /dev/null +++ b/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap new file mode 100755 index 0000000000..4747a84d31 --- /dev/null +++ b/contrib/mkimage/debootstrap @@ -0,0 +1,125 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +shift + +( + set -x + debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ cat > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF' +#!/bin/sh +exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + chroot "$rootfsDir" dpkg-divert --local --rename --add /sbin/initctl + ln -sf /bin/true "$rootfsDir/sbin/initctl" +) + +# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) +( set -x; chroot "$rootfsDir" apt-get clean ) + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + echo 'force-unsafe-io' > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + echo 'Acquire::Languages "none";' > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + case "$lsbDist" in + debian|Debian) + # updates and security! + if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then + ( + set -x + sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + ubuntu|Ubuntu) + # add the universe, updates, and security repositories + ( + set -x + sed -i " + s/ $suite main$/ $suite main universe/; p; + s/ $suite main/ ${suite}-updates main/; p; + s/ $suite-updates main/ ${suite}-security main/ + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu|Tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos|SteamOS) + # add contrib and non-free + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" apt-get update + chroot "$rootfsDir" apt-get dist-upgrade -y +) diff --git a/contrib/mkimage/rinse b/contrib/mkimage/rinse new file mode 100755 index 0000000000..75eb4f0d9d --- /dev/null +++ b/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/contrib/vagrant-docker/README.md b/contrib/vagrant-docker/README.md index d422492fe9..4ef9c28775 100644 --- a/contrib/vagrant-docker/README.md +++ b/contrib/vagrant-docker/README.md @@ -31,20 +31,20 @@ stop on runlevel [!2345] respawn script - /usr/bin/docker -d -H=tcp://0.0.0.0:4243 + /usr/bin/docker -d -H=tcp://0.0.0.0:2375 end script ``` Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: ``` -ssh -L 4243:localhost:4243 -p 2222 vagrant@localhost +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost ``` -(The first 4243 is what your host can connect to, the second 4243 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) Note that because the port has been changed, to run docker commands from within the command line you must run them like this: ``` -sudo docker -H 0.0.0.0:4243 < commands for docker > +sudo docker -H 0.0.0.0:2375 < commands for docker > ``` diff --git a/daemon/README.md b/daemon/README.md new file mode 100644 index 0000000000..64bfcb55ee --- /dev/null +++ b/daemon/README.md @@ -0,0 +1,10 @@ +This directory contains code pertaining to running containers and storing images + +Code pertaining to running containers: + + - execdriver + - networkdriver + +Code pertaining to storing images: + + - graphdriver diff --git a/daemon/container.go b/daemon/container.go index 7b6b65494e..b4a33e3e18 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -9,6 +9,7 @@ import ( "log" "os" "path" + "path/filepath" "strings" "sync" "syscall" @@ -22,8 +23,10 @@ import ( "github.com/dotcloud/docker/links" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/libcontainer/devices" "github.com/dotcloud/docker/pkg/networkfs/etchosts" "github.com/dotcloud/docker/pkg/networkfs/resolvconf" + "github.com/dotcloud/docker/pkg/symlink" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" ) @@ -81,42 +84,6 @@ type Container struct { activeLinks map[string]*links.Link } -// Inject the io.Reader at the given path. Note: do not close the reader -func (container *Container) Inject(file io.Reader, pth string) error { - if err := container.Mount(); err != nil { - return fmt.Errorf("inject: error mounting container %s: %s", container.ID, err) - } - defer container.Unmount() - - // Return error if path exists - destPath := path.Join(container.basefs, pth) - if _, err := os.Stat(destPath); err == nil { - // Since err is nil, the path could be stat'd and it exists - return fmt.Errorf("%s exists", pth) - } else if !os.IsNotExist(err) { - // Expect err might be that the file doesn't exist, so - // if it's some other error, return that. - - return err - } - - // Make sure the directory exists - if err := os.MkdirAll(path.Join(container.basefs, path.Dir(pth)), 0755); err != nil { - return err - } - - dest, err := os.Create(destPath) - if err != nil { - return err - } - defer dest.Close() - - if _, err := io.Copy(dest, file); err != nil { - return err - } - return nil -} - func (container *Container) FromDisk() error { data, err := ioutil.ReadFile(container.jsonPath()) if err != nil { @@ -170,6 +137,16 @@ func (container *Container) WriteHostConfig() (err error) { return ioutil.WriteFile(container.hostConfigPath(), data, 0666) } +func (container *Container) getResourcePath(path string) string { + cleanPath := filepath.Join("/", path) + return filepath.Join(container.basefs, cleanPath) +} + +func (container *Container) getRootResourcePath(path string) string { + cleanPath := filepath.Join("/", path) + return filepath.Join(container.root, cleanPath) +} + func populateCommand(c *Container, env []string) error { var ( en *execdriver.Network @@ -215,20 +192,23 @@ func populateCommand(c *Container, env []string) error { Memory: c.Config.Memory, MemorySwap: c.Config.MemorySwap, CpuShares: c.Config.CpuShares, + Cpuset: c.Config.Cpuset, } c.command = &execdriver.Command{ - ID: c.ID, - Privileged: c.hostConfig.Privileged, - Rootfs: c.RootfsPath(), - InitPath: "/.dockerinit", - Entrypoint: c.Path, - Arguments: c.Args, - WorkingDir: c.Config.WorkingDir, - Network: en, - Tty: c.Config.Tty, - User: c.Config.User, - Config: context, - Resources: resources, + ID: c.ID, + Privileged: c.hostConfig.Privileged, + Rootfs: c.RootfsPath(), + InitPath: "/.dockerinit", + Entrypoint: c.Path, + Arguments: c.Args, + WorkingDir: c.Config.WorkingDir, + Network: en, + Tty: c.Config.Tty, + User: c.Config.User, + Config: context, + Resources: resources, + AllowedDevices: devices.DefaultAllowedDevices, + AutoCreatedDevices: devices.DefaultAutoCreatedDevices, } c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} c.command.Env = env @@ -344,7 +324,7 @@ func (container *Container) StderrLogPipe() io.ReadCloser { } func (container *Container) buildHostnameFile() error { - container.HostnamePath = path.Join(container.root, "hostname") + container.HostnamePath = container.getRootResourcePath("hostname") if container.Config.Domainname != "" { return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) } @@ -356,7 +336,7 @@ func (container *Container) buildHostnameAndHostsFiles(IP string) error { return err } - container.HostsPath = path.Join(container.root, "hosts") + container.HostsPath = container.getRootResourcePath("hosts") extraContent := make(map[string]string) @@ -455,6 +435,20 @@ func (container *Container) monitor(callback execdriver.StartCallback) error { utils.Errorf("Error running container: %s", err) } + // Cleanup + container.cleanup() + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } + + if container.daemon != nil && container.daemon.srv != nil { + container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image)) + } + + close(container.waitLock) + if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() { container.State.SetStopped(exitCode) @@ -470,20 +464,6 @@ func (container *Container) monitor(callback execdriver.StartCallback) error { } } - // Cleanup - container.cleanup() - - // Re-create a brand new stdin pipe once the container exited - if container.Config.OpenStdin { - container.stdin, container.stdinPipe = io.Pipe() - } - - if container.daemon != nil && container.daemon.srv != nil { - container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image)) - } - - close(container.waitLock) - return err } @@ -522,12 +502,37 @@ func (container *Container) KillSig(sig int) error { container.Lock() defer container.Unlock() + // We could unpause the container for them rather than returning this error + if container.State.IsPaused() { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + if !container.State.IsRunning() { return nil } return container.daemon.Kill(container, sig) } +func (container *Container) Pause() error { + if container.State.IsPaused() { + return fmt.Errorf("Container %s is already paused", container.ID) + } + if !container.State.IsRunning() { + return fmt.Errorf("Container %s is not running", container.ID) + } + return container.daemon.Pause(container) +} + +func (container *Container) Unpause() error { + if !container.State.IsPaused() { + return fmt.Errorf("Container %s is not paused", container.ID) + } + if !container.State.IsRunning() { + return fmt.Errorf("Container %s is not running", container.ID) + } + return container.daemon.Unpause(container) +} + func (container *Container) Kill() error { if !container.State.IsRunning() { return nil @@ -571,6 +576,7 @@ func (container *Container) Stop(seconds int) error { log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) // 3. If it doesn't, then send SIGKILL if err := container.Kill(); err != nil { + container.Wait() return err } } @@ -640,7 +646,7 @@ func (container *Container) Export() (archive.Archive, error) { } func (container *Container) WaitTimeout(timeout time.Duration) error { - done := make(chan bool) + done := make(chan bool, 1) go func() { container.Wait() done <- true @@ -659,6 +665,8 @@ func (container *Container) Mount() error { } func (container *Container) Changes() ([]archive.Change, error) { + container.Lock() + defer container.Unlock() return container.daemon.Changes(container) } @@ -674,7 +682,7 @@ func (container *Container) Unmount() error { } func (container *Container) logPath(name string) string { - return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name)) + return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name)) } func (container *Container) ReadLog(name string) (io.Reader, error) { @@ -682,11 +690,11 @@ func (container *Container) ReadLog(name string) (io.Reader, error) { } func (container *Container) hostConfigPath() string { - return path.Join(container.root, "hostconfig.json") + return container.getRootResourcePath("hostconfig.json") } func (container *Container) jsonPath() string { - return path.Join(container.root, "config.json") + return container.getRootResourcePath("config.json") } // This method must be exported to be used from the lxc template @@ -745,8 +753,16 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) { if err := container.Mount(); err != nil { return nil, err } + var filter []string - basePath := path.Join(container.basefs, resource) + + resPath := container.getResourcePath(resource) + basePath, err := symlink.FollowSymlinkInScope(resPath, container.basefs) + if err != nil { + container.Unmount() + return nil, err + } + stat, err := os.Stat(basePath) if err != nil { container.Unmount() @@ -766,6 +782,7 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) { Includes: filter, }) if err != nil { + container.Unmount() return nil, err } return utils.NewReadCloserWrapper(archive, func() error { @@ -844,7 +861,7 @@ func (container *Container) setupContainerDns() error { } else if len(daemon.config.DnsSearch) > 0 { dnsSearch = daemon.config.DnsSearch } - container.ResolvConfPath = path.Join(container.root, "resolv.conf") + container.ResolvConfPath = container.getRootResourcePath("resolv.conf") return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch) } else { container.ResolvConfPath = "/etc/resolv.conf" @@ -865,9 +882,17 @@ func (container *Container) initializeNetworking() error { container.Config.Hostname = parts[0] container.Config.Domainname = parts[1] } - container.HostsPath = "/etc/hosts" - return container.buildHostnameFile() + content, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + return container.buildHostnameAndHostsFiles("") + } + if err != nil { + return err + } + + container.HostsPath = container.getRootResourcePath("hosts") + return ioutil.WriteFile(container.HostsPath, content, 0644) } else if container.hostConfig.NetworkMode.IsContainer() { // we need to get the hosts files from the container to join nc, err := container.getNetworkedContainer() @@ -982,12 +1007,12 @@ func (container *Container) setupWorkingDirectory() error { if container.Config.WorkingDir != "" { container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) - pthInfo, err := os.Stat(path.Join(container.basefs, container.Config.WorkingDir)) + pthInfo, err := os.Stat(container.getResourcePath(container.Config.WorkingDir)) if err != nil { if !os.IsNotExist(err) { return err } - if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil { + if err := os.MkdirAll(container.getResourcePath(container.Config.WorkingDir), 0755); err != nil { return err } } diff --git a/daemon/daemon.go b/daemon/daemon.go index 00b6d9eee2..b990b0df60 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -1,7 +1,6 @@ package daemon import ( - "container/list" "fmt" "io" "io/ioutil" @@ -28,7 +27,7 @@ import ( "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/pkg/label" - "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/pkg/namesgenerator" "github.com/dotcloud/docker/pkg/networkfs/resolvconf" "github.com/dotcloud/docker/pkg/selinux" "github.com/dotcloud/docker/pkg/sysinfo" @@ -47,10 +46,43 @@ var ( validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) ) +type contStore struct { + s map[string]*Container + sync.Mutex +} + +func (c *contStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +func (c *contStore) Get(id string) *Container { + c.Lock() + res := c.s[id] + c.Unlock() + return res +} + +func (c *contStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +func (c *contStore) List() []*Container { + containers := new(History) + for _, cont := range c.s { + containers.Add(cont) + } + containers.Sort() + return *containers +} + type Daemon struct { repository string sysInitPath string - containers *list.List + containers *contStore graph *graph.Graph repositories *graph.TagStore idIndex *utils.TruncIndex @@ -64,38 +96,14 @@ type Daemon struct { execDriver execdriver.Driver } -// Mountpoints should be private to the container -func remountPrivate(mountPoint string) error { - mounted, err := mount.Mounted(mountPoint) - if err != nil { - return err - } - - if !mounted { - if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } - } - return mount.ForceMount("", mountPoint, "none", "private") +// Install installs daemon capabilities to eng. +func (daemon *Daemon) Install(eng *engine.Engine) error { + return eng.Register("container_inspect", daemon.ContainerInspect) } // List returns an array of all containers registered in the daemon. func (daemon *Daemon) List() []*Container { - containers := new(History) - for e := daemon.containers.Front(); e != nil; e = e.Next() { - containers.Add(e.Value.(*Container)) - } - return *containers -} - -func (daemon *Daemon) getContainerElement(id string) *list.Element { - for e := daemon.containers.Front(); e != nil; e = e.Next() { - container := e.Value.(*Container) - if container.ID == id { - return e - } - } - return nil + return daemon.containers.List() } // Get looks for a container by the specified ID or name, and returns it. @@ -110,11 +118,7 @@ func (daemon *Daemon) Get(name string) *Container { return nil } - e := daemon.getContainerElement(id) - if e == nil { - return nil - } - return e.Value.(*Container) + return daemon.containers.Get(id) } // Exists returns a true if a container of the specified ID or name exists, @@ -141,7 +145,13 @@ func (daemon *Daemon) load(id string) (*Container, error) { } // Register makes a container object usable by the daemon as +// This is a wrapper for register func (daemon *Daemon) Register(container *Container) error { + return daemon.register(container, true, nil) +} + +// register makes a container object usable by the daemon as +func (daemon *Daemon) register(container *Container, updateSuffixarray bool, containersToStart *[]*Container) error { if container.daemon != nil || daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } @@ -164,8 +174,15 @@ func (daemon *Daemon) Register(container *Container) error { container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done - daemon.containers.PushBack(container) - daemon.idIndex.Add(container.ID) + daemon.containers.Add(container.ID, container) + + // don't update the Suffixarray if we're starting up + // we'll waste time if we update it for every container + if updateSuffixarray { + daemon.idIndex.Add(container.ID) + } else { + daemon.idIndex.AddWithoutSuffixarrayUpdate(container.ID) + } // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock @@ -203,13 +220,13 @@ func (daemon *Daemon) Register(container *Container) error { if !info.IsRunning() { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if daemon.config.AutoRestart { - utils.Debugf("Restarting") + utils.Debugf("Marking as restarting") if err := container.Unmount(); err != nil { utils.Debugf("restart unmount error %s", err) } - if err := container.Start(); err != nil { - return err + if containersToStart != nil { + *containersToStart = append(*containersToStart, container) } } else { utils.Debugf("Marking as stopped") @@ -231,20 +248,15 @@ func (daemon *Daemon) Register(container *Container) error { func (daemon *Daemon) ensureName(container *Container) error { if container.Name == "" { - name, err := generateRandomName(daemon) + name, err := daemon.generateNewName(container.ID) if err != nil { - name = utils.TruncateID(container.ID) + return err } container.Name = name if err := container.ToDisk(); err != nil { utils.Debugf("Error saving container name %s", err) } - if !daemon.containerGraph.Exists(name) { - if _, err := daemon.containerGraph.Set(name, container.ID); err != nil { - utils.Debugf("Setting default id - %s", err) - } - } } return nil } @@ -264,7 +276,7 @@ func (daemon *Daemon) Destroy(container *Container) error { return fmt.Errorf("The given container is ") } - element := daemon.getContainerElement(container.ID) + element := daemon.containers.Get(container.ID) if element == nil { return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) } @@ -275,7 +287,11 @@ func (daemon *Daemon) Destroy(container *Container) error { // Deregister the container before removing its directory, to avoid race conditions daemon.idIndex.Delete(container.ID) - daemon.containers.Remove(element) + daemon.containers.Delete(container.ID) + + if _, err := daemon.containerGraph.Purge(container.ID); err != nil { + utils.Debugf("Unable to remove container from link graph: %s", err) + } if err := daemon.driver.Remove(container.ID); err != nil { return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) @@ -286,10 +302,6 @@ func (daemon *Daemon) Destroy(container *Container) error { return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err) } - if _, err := daemon.containerGraph.Purge(container.ID); err != nil { - utils.Debugf("Unable to remove container from link graph: %s", err) - } - if err := os.RemoveAll(container.root); err != nil { return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) } @@ -299,20 +311,25 @@ func (daemon *Daemon) Destroy(container *Container) error { } func (daemon *Daemon) restore() error { - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { + var ( + debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") + containers = make(map[string]*Container) + currentDriver = daemon.driver.String() + containersToStart = []*Container{} + ) + + if !debug { fmt.Printf("Loading containers: ") } dir, err := ioutil.ReadDir(daemon.repository) if err != nil { return err } - containers := make(map[string]*Container) - currentDriver := daemon.driver.String() for _, v := range dir { id := v.Name() container, err := daemon.load(id) - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { + if !debug { fmt.Print(".") } if err != nil { @@ -329,20 +346,16 @@ func (daemon *Daemon) restore() error { } } - register := func(container *Container) { - if err := daemon.Register(container); err != nil { - utils.Debugf("Failed to register container %s: %s", container.ID, err) - } - } - if entities := daemon.containerGraph.List("/", -1); entities != nil { for _, p := range entities.Paths() { - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { + if !debug { fmt.Print(".") } e := entities[p] if container, ok := containers[e.ID()]; ok { - register(container) + if err := daemon.register(container, false, &containersToStart); err != nil { + utils.Debugf("Failed to register container %s: %s", container.ID, err) + } delete(containers, e.ID()) } } @@ -351,18 +364,25 @@ func (daemon *Daemon) restore() error { // Any containers that are left over do not exist in the graph for _, container := range containers { // Try to set the default name for a container if it exists prior to links - container.Name, err = generateRandomName(daemon) + container.Name, err = daemon.generateNewName(container.ID) if err != nil { - container.Name = utils.TruncateID(container.ID) - } - - if _, err := daemon.containerGraph.Set(container.Name, container.ID); err != nil { utils.Debugf("Setting default id - %s", err) } - register(container) + if err := daemon.register(container, false, &containersToStart); err != nil { + utils.Debugf("Failed to register container %s: %s", container.ID, err) + } } - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { + daemon.idIndex.UpdateSuffixarray() + + for _, container := range containersToStart { + utils.Debugf("Starting container %d", container.ID) + if err := container.Start(); err != nil { + utils.Debugf("Failed to start container %s: %s", container.ID, err) + } + } + + if !debug { fmt.Printf(": done.\n") } @@ -450,42 +470,75 @@ func (daemon *Daemon) generateIdAndName(name string) (string, string, error) { ) if name == "" { - name, err = generateRandomName(daemon) - if err != nil { - name = utils.TruncateID(id) - } - } else { - if !validContainerNamePattern.MatchString(name) { - return "", "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err } + return id, name, nil } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(name) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { name = "/" + name } - // Set the enitity in the graph using the default name specified + if _, err := daemon.containerGraph.Set(name, id); err != nil { if !graphdb.IsNonUniqueNameError(err) { - return "", "", err + return "", err } conflictingContainer, err := daemon.GetByName(name) if err != nil { if strings.Contains(err.Error(), "Could not find entity") { - return "", "", err + return "", err } // Remove name and continue starting the container if err := daemon.containerGraph.Delete(name); err != nil { - return "", "", err + return "", err } } else { nameAsKnownByUser := strings.TrimPrefix(name, "/") - return "", "", fmt.Errorf( + return "", fmt.Errorf( "Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser, utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser) } } - return id, name, nil + return name, nil +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if _, err := daemon.containerGraph.Set(name, id); err != nil { + if !graphdb.IsNonUniqueNameError(err) { + return "", err + } + continue + } + return name, nil + } + + name = "/" + utils.TruncateID(id) + if _, err := daemon.containerGraph.Set(name, id); err != nil { + return "", err + } + return name, nil } func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) { @@ -592,15 +645,18 @@ func (daemon *Daemon) Commit(container *Container, repository, tag, comment, aut containerID, containerImage string containerConfig *runconfig.Config ) + if container != nil { containerID = container.ID containerImage = container.Image containerConfig = container.Config } + img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config) if err != nil { return nil, err } + // Register the image if needed if repository != "" { if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil { @@ -629,11 +685,11 @@ func (daemon *Daemon) GetByName(name string) (*Container, error) { if entity == nil { return nil, fmt.Errorf("Could not find entity for %s", name) } - e := daemon.getContainerElement(entity.ID()) + e := daemon.containers.Get(entity.ID()) if e == nil { return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID()) } - return e.Value.(*Container), nil + return e, nil } func (daemon *Daemon) Children(name string) (map[string]*Container, error) { @@ -667,6 +723,35 @@ func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error return nil } +func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { + if hostConfig != nil && hostConfig.Links != nil { + for _, l := range hostConfig.Links { + parts, err := utils.PartParser("name:alias", l) + if err != nil { + return err + } + child, err := daemon.GetByName(parts["name"]) + if err != nil { + return err + } + if child == nil { + return fmt.Errorf("Could not get container for %s", parts["name"]) + } + if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + hostConfig.Links = nil + if err := container.WriteHostConfig(); err != nil { + return err + } + } + return nil +} + // FIXME: harmonize with NewGraph() func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) { daemon, err := NewDaemonFromDirectory(config, eng) @@ -680,20 +765,22 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D if !config.EnableSelinuxSupport { selinux.SetDisabled() } + + // Create the root directory if it doesn't exists + if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + // Set the default driver graphdriver.DefaultDriver = config.GraphDriver // Load storage driver - driver, err := graphdriver.New(config.Root) + driver, err := graphdriver.New(config.Root, config.GraphOptions) if err != nil { return nil, err } utils.Debugf("Using graph driver %s", driver) - if err := remountPrivate(config.Root); err != nil { - return nil, err - } - daemonRepo := path.Join(config.Root, "containers") if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) { @@ -713,7 +800,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D // We don't want to use a complex driver like aufs or devmapper // for volumes, just a plain filesystem - volumesDriver, err := graphdriver.GetDriver("vfs", config.Root) + volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions) if err != nil { return nil, err } @@ -777,7 +864,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D daemon := &Daemon{ repository: daemonRepo, - containers: list.New(), + containers: &contStore{s: make(map[string]*Container)}, graph: g, repositories: repositories, idIndex: utils.NewTruncIndex([]string{}), @@ -914,6 +1001,22 @@ func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback e return daemon.execDriver.Run(c.command, pipes, startCallback) } +func (daemon *Daemon) Pause(c *Container) error { + if err := daemon.execDriver.Pause(c.command); err != nil { + return err + } + c.State.SetPaused() + return nil +} + +func (daemon *Daemon) Unpause(c *Container) error { + if err := daemon.execDriver.Unpause(c.command); err != nil { + return err + } + c.State.SetUnpaused() + return nil +} + func (daemon *Daemon) Kill(c *Container, sig int) error { return daemon.execDriver.Kill(c.command, sig) } diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index 4837a398ea..a5c5c814d7 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -5,6 +5,8 @@ import ( "io" "os" "os/exec" + + "github.com/dotcloud/docker/pkg/libcontainer/devices" ) // Context is a generic key value pair that allows @@ -81,6 +83,8 @@ type TtyTerminal interface { type Driver interface { Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code Kill(c *Command, sig int) error + Pause(c *Command) error + Unpause(c *Command) error Name() string // Driver name Info(id string) Info // "temporary" hack (until we move state from core to plugins) GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. @@ -103,9 +107,10 @@ type NetworkInterface struct { } type Resources struct { - Memory int64 `json:"memory"` - MemorySwap int64 `json:"memory_swap"` - CpuShares int64 `json:"cpu_shares"` + Memory int64 `json:"memory"` + MemorySwap int64 `json:"memory_swap"` + CpuShares int64 `json:"cpu_shares"` + Cpuset string `json:"cpuset"` } type Mount struct { @@ -119,20 +124,22 @@ type Mount struct { type Command struct { exec.Cmd `json:"-"` - ID string `json:"id"` - Privileged bool `json:"privileged"` - User string `json:"user"` - Rootfs string `json:"rootfs"` // root fs of the container - InitPath string `json:"initpath"` // dockerinit - Entrypoint string `json:"entrypoint"` - Arguments []string `json:"arguments"` - WorkingDir string `json:"working_dir"` - ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver - Tty bool `json:"tty"` - Network *Network `json:"network"` - Config map[string][]string `json:"config"` // generic values that specific drivers can consume - Resources *Resources `json:"resources"` - Mounts []Mount `json:"mounts"` + ID string `json:"id"` + Privileged bool `json:"privileged"` + User string `json:"user"` + Rootfs string `json:"rootfs"` // root fs of the container + InitPath string `json:"initpath"` // dockerinit + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + WorkingDir string `json:"working_dir"` + ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver + Tty bool `json:"tty"` + Network *Network `json:"network"` + Config map[string][]string `json:"config"` // generic values that specific drivers can consume + Resources *Resources `json:"resources"` + Mounts []Mount `json:"mounts"` + AllowedDevices []*devices.Device `json:"allowed_devices"` + AutoCreatedDevices []*devices.Device `json:"autocreated_devices"` Terminal Terminal `json:"-"` // standard or tty terminal Console string `json:"-"` // dev/console path diff --git a/daemon/execdriver/execdrivers/execdrivers.go b/daemon/execdriver/execdrivers/execdrivers.go index 18db1f8026..2e18454a09 100644 --- a/daemon/execdriver/execdrivers/execdrivers.go +++ b/daemon/execdriver/execdrivers/execdrivers.go @@ -12,7 +12,7 @@ import ( func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { switch name { case "lxc": - // we want to five the lxc driver the full docker root because it needs + // we want to give the lxc driver the full docker root because it needs // to access and write config and template files in /var/lib/docker/containers/* // to be backwards compatible return lxc.NewDriver(root, sysInfo.AppArmor) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index d787d8d873..8f785e8a8f 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -9,14 +9,16 @@ import ( "os/exec" "path" "path/filepath" + "runtime" "strconv" "strings" "syscall" "time" "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/mount/nodes" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/utils" ) @@ -25,6 +27,7 @@ const DriverName = "lxc" func init() { execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { + runtime.LockOSThread() if err := setupEnv(args); err != nil { return err } @@ -159,6 +162,10 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c.Path = aname c.Args = append([]string{name}, arg...) + if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { + return -1, err + } + if err := c.Start(); err != nil { return -1, err } @@ -167,6 +174,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba waitErr error waitLock = make(chan struct{}) ) + go func() { if err := c.Wait(); err != nil { if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 @@ -181,9 +189,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba if err != nil { if c.Process != nil { c.Process.Kill() + c.Wait() } return -1, err } + c.ContainerPid = pid if startCallback != nil { @@ -208,6 +218,30 @@ func (d *driver) Kill(c *execdriver.Command, sig int) error { return KillLxc(c.ID, sig) } +func (d *driver) Pause(c *execdriver.Command) error { + _, err := exec.LookPath("lxc-freeze") + if err == nil { + output, errExec := exec.Command("lxc-freeze", "-n", c.ID).CombinedOutput() + if errExec != nil { + return fmt.Errorf("Err: %s Output: %s", errExec, output) + } + } + + return err +} + +func (d *driver) Unpause(c *execdriver.Command) error { + _, err := exec.LookPath("lxc-unfreeze") + if err == nil { + output, errExec := exec.Command("lxc-unfreeze", "-n", c.ID).CombinedOutput() + if errExec != nil { + return fmt.Errorf("Err: %s Output: %s", errExec, output) + } + } + + return err +} + func (d *driver) Terminate(c *execdriver.Command) error { return KillLxc(c.ID, 9) } @@ -268,18 +302,14 @@ func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (in } output, err = d.getInfo(c.ID) - if err != nil { - output, err = d.getInfo(c.ID) + if err == nil { + info, err := parseLxcInfo(string(output)) if err != nil { return -1, err } - } - info, err := parseLxcInfo(string(output)) - if err != nil { - return -1, err - } - if info.Running { - return info.Pid, nil + if info.Running { + return info.Pid, nil + } } time.Sleep(50 * time.Millisecond) } diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go index e21e717645..5e77190e5a 100644 --- a/daemon/execdriver/lxc/init.go +++ b/daemon/execdriver/lxc/init.go @@ -88,7 +88,7 @@ func setupNetworking(args *execdriver.InitArgs) error { return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) } - if err := netlink.AddDefaultGw(gw); err != nil { + if err := netlink.AddDefaultGw(gw.String(), "eth0"); err != nil { return fmt.Errorf("Unable to set up networking: %v", err) } } diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index 7fdc5ce92b..fcebe134e7 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -15,7 +15,9 @@ lxc.network.type = veth lxc.network.link = {{.Network.Interface.Bridge}} lxc.network.name = eth0 lxc.network.mtu = {{.Network.Mtu}} -{{else if not .Network.HostNetworking}} +{{else if .Network.HostNetworking}} +lxc.network.type = none +{{else}} # network is disabled (-n=false) lxc.network.type = empty lxc.network.flags = up @@ -45,37 +47,10 @@ lxc.cgroup.devices.allow = a {{else}} # no implicit access to devices lxc.cgroup.devices.deny = a - -# but allow mknod for any device -lxc.cgroup.devices.allow = c *:* m -lxc.cgroup.devices.allow = b *:* m - -# /dev/null and zero -lxc.cgroup.devices.allow = c 1:3 rwm -lxc.cgroup.devices.allow = c 1:5 rwm - -# consoles -lxc.cgroup.devices.allow = c 5:1 rwm -lxc.cgroup.devices.allow = c 5:0 rwm -lxc.cgroup.devices.allow = c 4:0 rwm -lxc.cgroup.devices.allow = c 4:1 rwm - -# /dev/urandom,/dev/random -lxc.cgroup.devices.allow = c 1:9 rwm -lxc.cgroup.devices.allow = c 1:8 rwm - -# /dev/pts/ - pts namespaces are "coming soon" -lxc.cgroup.devices.allow = c 136:* rwm -lxc.cgroup.devices.allow = c 5:2 rwm - -# tuntap -lxc.cgroup.devices.allow = c 10:200 rwm - -# fuse -#lxc.cgroup.devices.allow = c 10:229 rwm - -# rtc -#lxc.cgroup.devices.allow = c 254:0 rwm +#Allow the devices passed to us in the AllowedDevices list. +{{range $allowedDevice := .AllowedDevices}} +lxc.cgroup.devices.allow = {{$allowedDevice.GetCgroupAllowString}} +{{end}} {{end}} # standard mount point @@ -126,6 +101,9 @@ lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} {{if .Resources.CpuShares}} lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} {{end}} +{{if .Resources.Cpuset}} +lxc.cgroup.cpuset.cpus = {{.Resources.Cpuset}} +{{end}} {{end}} {{if .Config.lxc}} diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go index 96d11b204b..12760adb7a 100644 --- a/daemon/execdriver/lxc/lxc_template_unit_test.go +++ b/daemon/execdriver/lxc/lxc_template_unit_test.go @@ -3,7 +3,6 @@ package lxc import ( "bufio" "fmt" - "github.com/dotcloud/docker/daemon/execdriver" "io/ioutil" "math/rand" "os" @@ -11,6 +10,9 @@ import ( "strings" "testing" "time" + + "github.com/dotcloud/docker/daemon/execdriver" + "github.com/dotcloud/docker/pkg/libcontainer/devices" ) func TestLXCConfig(t *testing.T) { @@ -47,6 +49,7 @@ func TestLXCConfig(t *testing.T) { Mtu: 1500, Interface: nil, }, + AllowedDevices: make([]*devices.Device, 0), } p, err := driver.generateLXCConfig(command) if err != nil { diff --git a/daemon/execdriver/native/configuration/parse.go b/daemon/execdriver/native/configuration/parse.go index 22fe4b0e66..f18a60f797 100644 --- a/daemon/execdriver/native/configuration/parse.go +++ b/daemon/execdriver/native/configuration/parse.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/pkg/units" ) type Action func(*libcontainer.Container, interface{}, string) error @@ -75,7 +75,7 @@ func memory(container *libcontainer.Container, context interface{}, value string return fmt.Errorf("cannot set cgroups when they are disabled") } - v, err := utils.RAMInBytes(value) + v, err := units.RAMInBytes(value) if err != nil { return err } @@ -88,7 +88,7 @@ func memoryReservation(container *libcontainer.Container, context interface{}, v return fmt.Errorf("cannot set cgroups when they are disabled") } - v, err := utils.RAMInBytes(value) + v, err := units.RAMInBytes(value) if err != nil { return err } @@ -109,12 +109,19 @@ func memorySwap(container *libcontainer.Container, context interface{}, value st } func addCap(container *libcontainer.Container, context interface{}, value string) error { - container.CapabilitiesMask[value] = true + container.Capabilities = append(container.Capabilities, value) return nil } func dropCap(container *libcontainer.Container, context interface{}, value string) error { - container.CapabilitiesMask[value] = false + // If the capability is specified multiple times, remove all instances. + for i, capability := range container.Capabilities { + if capability == value { + container.Capabilities = append(container.Capabilities[:i], container.Capabilities[i+1:]...) + } + } + + // The capability wasn't found so we will drop it anyways. return nil } diff --git a/daemon/execdriver/native/configuration/parse_test.go b/daemon/execdriver/native/configuration/parse_test.go index 1b0316b688..5524adb857 100644 --- a/daemon/execdriver/native/configuration/parse_test.go +++ b/daemon/execdriver/native/configuration/parse_test.go @@ -4,8 +4,19 @@ import ( "testing" "github.com/dotcloud/docker/daemon/execdriver/native/template" + "github.com/dotcloud/docker/pkg/libcontainer" ) +// Checks whether the expected capability is specified in the capabilities. +func hasCapability(expected string, capabilities []string) bool { + for _, capability := range capabilities { + if capability == expected { + return true + } + } + return false +} + func TestSetReadonlyRootFs(t *testing.T) { var ( container = template.New() @@ -39,10 +50,10 @@ func TestConfigurationsDoNotConflict(t *testing.T) { t.Fatal(err) } - if !container1.CapabilitiesMask["NET_ADMIN"] { + if !hasCapability("NET_ADMIN", container1.Capabilities) { t.Fatal("container one should have NET_ADMIN enabled") } - if container2.CapabilitiesMask["NET_ADMIN"] { + if hasCapability("NET_ADMIN", container2.Capabilities) { t.Fatal("container two should not have NET_ADMIN enabled") } } @@ -138,10 +149,10 @@ func TestAddCap(t *testing.T) { t.Fatal(err) } - if !container.CapabilitiesMask["MKNOD"] { + if !hasCapability("MKNOD", container.Capabilities) { t.Fatal("container should have MKNOD enabled") } - if !container.CapabilitiesMask["SYS_ADMIN"] { + if !hasCapability("SYS_ADMIN", container.Capabilities) { t.Fatal("container should have SYS_ADMIN enabled") } } @@ -154,14 +165,12 @@ func TestDropCap(t *testing.T) { } ) // enabled all caps like in privileged mode - for key := range container.CapabilitiesMask { - container.CapabilitiesMask[key] = true - } + container.Capabilities = libcontainer.GetAllCapabilities() if err := ParseConfiguration(container, nil, opts); err != nil { t.Fatal(err) } - if container.CapabilitiesMask["MKNOD"] { + if hasCapability("MKNOD", container.Capabilities) { t.Fatal("container should not have MKNOD enabled") } } diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index a7b3d9a107..9de500dbe5 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -3,6 +3,7 @@ package native import ( "fmt" "os" + "os/exec" "path/filepath" "github.com/dotcloud/docker/daemon/execdriver" @@ -10,6 +11,7 @@ import ( "github.com/dotcloud/docker/daemon/execdriver/native/template" "github.com/dotcloud/docker/pkg/apparmor" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/devices" ) // createContainer populates and configures the container type with the @@ -23,6 +25,8 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container container.WorkingDir = c.WorkingDir container.Env = c.Env container.Cgroups.Name = c.ID + container.Cgroups.AllowedDevices = c.AllowedDevices + container.DeviceNodes = c.AutoCreatedDevices // check to see if we are running in ramdisk to disable pivot root container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" container.Context["restrictions"] = "true" @@ -34,8 +38,6 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container if err := d.setPrivileged(container); err != nil { return nil, err } - } else { - container.Mounts = append(container.Mounts, libcontainer.Mount{Type: "devtmpfs"}) } if err := d.setupCgroups(container, c); err != nil { return nil, err @@ -46,7 +48,13 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container if err := d.setupLabels(container, c); err != nil { return nil, err } - if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil { + cmds := make(map[string]*exec.Cmd) + d.Lock() + for k, v := range d.activeContainers { + cmds[k] = v.cmd + } + d.Unlock() + if err := configuration.ParseConfiguration(container, cmds, c.Config["native"]); err != nil { return nil, err } return container, nil @@ -82,10 +90,14 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver. } if c.Network.ContainerID != "" { - cmd := d.activeContainers[c.Network.ContainerID] - if cmd == nil || cmd.Process == nil { + d.Lock() + active := d.activeContainers[c.Network.ContainerID] + d.Unlock() + if active == nil || active.cmd.Process == nil { return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) } + cmd := active.cmd + nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") container.Networks = append(container.Networks, &libcontainer.Network{ Type: "netns", @@ -97,11 +109,15 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver. return nil } -func (d *driver) setPrivileged(container *libcontainer.Container) error { - for key := range container.CapabilitiesMask { - container.CapabilitiesMask[key] = true +func (d *driver) setPrivileged(container *libcontainer.Container) (err error) { + container.Capabilities = libcontainer.GetAllCapabilities() + container.Cgroups.AllowAllDevices = true + + hostDeviceNodes, err := devices.GetHostDeviceNodes() + if err != nil { + return err } - container.Cgroups.DeviceAccess = true + container.DeviceNodes = hostDeviceNodes delete(container.Context, "restrictions") @@ -117,6 +133,7 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C container.Cgroups.Memory = c.Resources.Memory container.Cgroups.MemoryReservation = c.Resources.Memory container.Cgroups.MemorySwap = c.Resources.MemorySwap + container.Cgroups.CpusetCpus = c.Resources.Cpuset } return nil } diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index 2e57729d4b..d3805b493c 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -7,22 +7,22 @@ import ( "os" "os/exec" "path/filepath" - "strconv" "strings" + "sync" "syscall" "github.com/dotcloud/docker/daemon/execdriver" "github.com/dotcloud/docker/pkg/apparmor" - "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/nsinit" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups/fs" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups/systemd" + "github.com/dotcloud/docker/pkg/libcontainer/namespaces" "github.com/dotcloud/docker/pkg/system" ) const ( - DriverName = "native" - Version = "0.2" - BackupApparmorProfilePath = "apparmor/docker.back" // relative to docker root + DriverName = "native" + Version = "0.2" ) func init() { @@ -42,35 +42,43 @@ func init() { if err != nil { return err } - syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(args.Pipe)) + syncPipe, err := namespaces.NewSyncPipeFromFd(0, uintptr(args.Pipe)) if err != nil { return err } - if err := nsinit.Init(container, rootfs, args.Console, syncPipe, args.Args); err != nil { + if err := namespaces.Init(container, rootfs, args.Console, syncPipe, args.Args); err != nil { return err } return nil }) } +type activeContainer struct { + container *libcontainer.Container + cmd *exec.Cmd +} + type driver struct { root string initPath string - activeContainers map[string]*exec.Cmd + activeContainers map[string]*activeContainer + sync.Mutex } func NewDriver(root, initPath string) (*driver, error) { if err := os.MkdirAll(root, 0700); err != nil { return nil, err } + // native driver root is at docker_root/execdriver/native. Put apparmor at docker_root - if err := apparmor.InstallDefaultProfile(filepath.Join(root, "../..", BackupApparmorProfilePath)); err != nil { + if err := apparmor.InstallDefaultProfile(); err != nil { return nil, err } + return &driver{ root: root, initPath: initPath, - activeContainers: make(map[string]*exec.Cmd), + activeContainers: make(map[string]*activeContainer), }, nil } @@ -80,7 +88,12 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba if err != nil { return -1, err } - d.activeContainers[c.ID] = &c.Cmd + d.Lock() + d.activeContainers[c.ID] = &activeContainer{ + container: container, + cmd: &c.Cmd, + } + d.Unlock() var ( dataPath = filepath.Join(d.root, c.ID) @@ -97,8 +110,8 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba term := getTerminal(c, pipes) - return nsinit.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Container, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd { - // we need to join the rootfs because nsinit will setup the rootfs and chroot + return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Container, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd { + // we need to join the rootfs because namespaces will setup the rootfs and chroot initPath := filepath.Join(c.Rootfs, c.InitPath) c.Path = d.initPath @@ -113,7 +126,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba // set this to nil so that when we set the clone flags anything else is reset c.SysProcAttr = nil - system.SetCloneFlags(&c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) + system.SetCloneFlags(&c.Cmd, uintptr(namespaces.GetNamespaceFlags(container.Namespaces))) c.ExtraFiles = []*os.File{child} c.Env = container.Env @@ -132,6 +145,30 @@ func (d *driver) Kill(p *execdriver.Command, sig int) error { return syscall.Kill(p.Process.Pid, syscall.Signal(sig)) } +func (d *driver) Pause(c *execdriver.Command) error { + active := d.activeContainers[c.ID] + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + active.container.Cgroups.Freezer = "FROZEN" + if systemd.UseSystemd() { + return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) + } + return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) +} + +func (d *driver) Unpause(c *execdriver.Command) error { + active := d.activeContainers[c.ID] + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + active.container.Cgroups.Freezer = "THAWED" + if systemd.UseSystemd() { + return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) + } + return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) +} + func (d *driver) Terminate(p *execdriver.Command) error { // lets check the start time for the process started, err := d.readStartTime(p) @@ -150,6 +187,7 @@ func (d *driver) Terminate(p *execdriver.Command) error { } if started == currentStartTime { err = syscall.Kill(p.Process.Pid, 9) + syscall.Wait4(p.Process.Pid, nil, 0, nil) } d.removeContainerRoot(p.ID) return err @@ -175,41 +213,20 @@ func (d *driver) Name() string { return fmt.Sprintf("%s-%s", DriverName, Version) } -// TODO: this can be improved with our driver -// there has to be a better way to do this func (d *driver) GetPidsForContainer(id string) ([]int, error) { - pids := []int{} + d.Lock() + active := d.activeContainers[id] + d.Unlock() - subsystem := "devices" - cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) - if err != nil { - return pids, err - } - cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) - if err != nil { - return pids, err + if active == nil { + return nil, fmt.Errorf("active container for %s does not exist", id) } + c := active.container.Cgroups - filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") - if _, err := os.Stat(filename); os.IsNotExist(err) { - filename = filepath.Join(cgroupRoot, cgroupDir, "docker", id, "tasks") + if systemd.UseSystemd() { + return systemd.GetPids(c) } - - output, err := ioutil.ReadFile(filename) - if err != nil { - return pids, err - } - for _, p := range strings.Split(string(output), "\n") { - if len(p) == 0 { - continue - } - pid, err := strconv.Atoi(p) - if err != nil { - return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) - } - pids = append(pids, pid) - } - return pids, nil + return fs.GetPids(c) } func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error { @@ -225,6 +242,10 @@ func (d *driver) createContainerRoot(id string) error { } func (d *driver) removeContainerRoot(id string) error { + d.Lock() + delete(d.activeContainers, id) + d.Unlock() + return os.RemoveAll(filepath.Join(d.root, id)) } @@ -238,8 +259,8 @@ func getEnv(key string, env []string) string { return "" } -func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) nsinit.Terminal { - var term nsinit.Terminal +func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) namespaces.Terminal { + var term namespaces.Terminal if c.Tty { term = &dockerTtyTerm{ pipes: pipes, diff --git a/daemon/execdriver/native/template/default_template.go b/daemon/execdriver/native/template/default_template.go index 249c5d5fe8..e7d3143df9 100644 --- a/daemon/execdriver/native/template/default_template.go +++ b/daemon/execdriver/native/template/default_template.go @@ -2,30 +2,25 @@ package template import ( "github.com/dotcloud/docker/pkg/apparmor" - "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) // New returns the docker default configuration for libcontainer func New() *libcontainer.Container { container := &libcontainer.Container{ - CapabilitiesMask: map[string]bool{ - "SETPCAP": false, - "SYS_MODULE": false, - "SYS_RAWIO": false, - "SYS_PACCT": false, - "SYS_ADMIN": false, - "SYS_NICE": false, - "SYS_RESOURCE": false, - "SYS_TIME": false, - "SYS_TTY_CONFIG": false, - "AUDIT_WRITE": false, - "AUDIT_CONTROL": false, - "MAC_OVERRIDE": false, - "MAC_ADMIN": false, - "NET_ADMIN": false, - "MKNOD": true, - "SYSLOG": false, + Capabilities: []string{ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", }, Namespaces: map[string]bool{ "NEWNS": true, @@ -35,8 +30,8 @@ func New() *libcontainer.Container { "NEWNET": true, }, Cgroups: &cgroups.Cgroup{ - Parent: "docker", - DeviceAccess: false, + Parent: "docker", + AllowAllDevices: false, }, Context: libcontainer.Context{}, } diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index 12b7a77fb3..97e9b9748a 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -7,7 +7,7 @@ aufs driver directory structure │   ├── 1 │   ├── 2 │   └── 3 -├── diffs // Content of the layer +├── diff // Content of the layer │   ├── 1 // Contains layers that need to be mounted for the id │   ├── 2 │   └── 3 @@ -23,20 +23,26 @@ package aufs import ( "bufio" "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon/graphdriver" - "github.com/dotcloud/docker/pkg/label" - mountpk "github.com/dotcloud/docker/pkg/mount" - "github.com/dotcloud/docker/utils" "os" "os/exec" "path" "strings" "sync" + "syscall" + + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/dotcloud/docker/pkg/label" + mountpk "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/utils" ) var ( ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + incompatibleFsMagic = []graphdriver.FsMagic{ + graphdriver.FsMagicBtrfs, + graphdriver.FsMagicAufs, + } ) func init() { @@ -51,11 +57,25 @@ type Driver struct { // New returns a new AUFS driver. // An error is returned if AUFS is not supported. -func Init(root string) (graphdriver.Driver, error) { +func Init(root string, options []string) (graphdriver.Driver, error) { // Try to load the aufs kernel module if err := supportsAufs(); err != nil { - return nil, err + return nil, graphdriver.ErrNotSupported } + + rootdir := path.Dir(root) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, fmt.Errorf("Couldn't stat the root directory: %s", err) + } + + for _, magic := range incompatibleFsMagic { + if graphdriver.FsMagic(buf.Type) == magic { + return nil, graphdriver.ErrIncompatibleFS + } + } + paths := []string{ "mnt", "diff", @@ -77,6 +97,10 @@ func Init(root string) (graphdriver.Driver, error) { return nil, err } + if err := graphdriver.MakePrivate(root); err != nil { + return nil, err + } + for _, p := range paths { if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { return nil, err @@ -351,12 +375,14 @@ func (a *Driver) Cleanup() error { if err != nil { return err } + for _, id := range ids { if err := a.unmount(id); err != nil { utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) } } - return nil + + return mountpk.Unmount(a.root) } func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go index 1ffa264aa1..b3bad410a5 100644 --- a/daemon/graphdriver/aufs/aufs_test.go +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -17,9 +17,9 @@ var ( ) func testInit(dir string, t *testing.T) graphdriver.Driver { - d, err := Init(dir) + d, err := Init(dir, nil) if err != nil { - if err == ErrAufsNotSupported { + if err == graphdriver.ErrNotSupported { t.Skip(err) } else { t.Fatal(err) diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go index 4d195537eb..f561244c51 100644 --- a/daemon/graphdriver/btrfs/btrfs.go +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -11,18 +11,20 @@ import "C" import ( "fmt" - "github.com/dotcloud/docker/daemon/graphdriver" "os" "path" "syscall" "unsafe" + + "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/dotcloud/docker/pkg/mount" ) func init() { graphdriver.Register("btrfs", Init) } -func Init(home string) (graphdriver.Driver, error) { +func Init(home string, options []string) (graphdriver.Driver, error) { rootdir := path.Dir(home) var buf syscall.Statfs_t @@ -30,8 +32,16 @@ func Init(home string) (graphdriver.Driver, error) { return nil, err } - if buf.Type != 0x9123683E { - return nil, fmt.Errorf("%s is not a btrfs filesystem", rootdir) + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + if err := os.MkdirAll(home, 0700); err != nil { + return nil, err + } + + if err := graphdriver.MakePrivate(home); err != nil { + return nil, err } return &Driver{ @@ -52,7 +62,7 @@ func (d *Driver) Status() [][2]string { } func (d *Driver) Cleanup() error { - return nil + return mount.Unmount(d.home) } func free(p *C.char) { diff --git a/daemon/graphdriver/btrfs/btrfs_test.go b/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 0000000000..3069a98557 --- /dev/null +++ b/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,28 @@ +package btrfs + +import ( + "github.com/dotcloud/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md new file mode 100644 index 0000000000..c8ab1d1ee1 --- /dev/null +++ b/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,143 @@ +## devicemapper - a storage backend based on Device Mapper + +### Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. For each devicemapper +graph location (typically `/var/lib/docker/devicemapper`, $graph below) +a thin pool is created based on two block devices, one for data and +one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically creates sparse +files. + +The default loopback files used are `$graph/devicemapper/data` and +`$graph/devicemapper/metadata`. Additional metadata required to map +from docker entities to the corresponding devicemapper volumes is +stored in the `$graph/devicemapper/json` file (encoded as Json). + +In order to support multiple devicemapper graphs on a system the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:30` part is the minor/major device nr and `19478248` is the +inode number of the $graph directory. + +On the thin pool docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formated on creation and contains just an +empty filesystem. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +### options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the --storage-opt flags. +This uses the `dm` prefix and would be used somthing like `docker -d --storage-opt dm.foo=bar`. + +Here is the list of supported options: + + * `dm.basesize` + + Specifies the size to use when creating the base device, which + limits the size of images and containers. The default value is + 10G. Note, thin devices are inherently "sparse", so a 10G device + which is mostly empty doesn't use 10 GB of space on the + pool. However, the filesystem will use more space for the empty + case the larger the device is. + + Example use: + + ``docker -d --storage-opt dm.basesize=20G`` + + * `dm.loopdatasize` + + Specifies the size to use when creating the loopback file for the + "data" device which is used for the thin pool. The default size is + 100G. Note that the file is sparse, so it will not initially take + up this much space. + + Example use: + + ``docker -d --storage-opt dm.loopdatasize=200G`` + + * `dm.loopmetadatasize` + + Specifies the size to use when creating the loopback file for the + "metadadata" device which is used for the thin pool. The default size is + 2G. Note that the file is sparse, so it will not initially take + up this much space. + + Example use: + + ``docker -d --storage-opt dm.loopmetadatasize=4G`` + + * `dm.fs` + + Specifies the filesystem type to use for the base device. The supported + options are "ext4" and "xfs". The default is "ext4" + + Example use: + + ``docker -d --storage-opt dm.fs=xfs`` + + * `dm.mkfsarg` + + Specifies extra mkfs arguments to be used when creating the base device. + + Example use: + + ``docker -d --storage-opt "dm.mkfsarg=-O ^has_journal"`` + + * `dm.mountopt` + + Specifies extra mount options used when mounting the thin devices. + + Example use: + + ``docker -d --storage-opt dm.mountopt=nodiscard`` + + * `dm.datadev` + + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both + datadev and metadatadev should be specified to completely avoid + using the loopback device. + + Example use: + + ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + + * `dm.metadatadev` + + Specifies a custom blockdevice to use for metadata for the thin + pool. + + For best performance the metadata should be on a different spindle + than the data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This + can be achieved by zeroing the first 4k to indicate empty + metadata, like this: + + ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1``` + + Example use: + + ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + + * `dm.blkdiscard` + + Enables or disables the use of blkdiscard when removing + devicemapper devices. This is enabled by default (only) if using + loopback devices and is required to res-parsify the loopback file + on image/container removal. + + Disabling this on loopback can lead to *much* faster container + removal times, but will make the space used in /var/lib/docker + directory not be returned to the system for other use when + containers are removed. + + Example use: + + ``docker -d --storage-opt dm.blkdiscard=false`` diff --git a/daemon/graphdriver/devmapper/attach_loopback.go b/daemon/graphdriver/devmapper/attach_loopback.go index 23339076e8..d2ab8c3a4b 100644 --- a/daemon/graphdriver/devmapper/attach_loopback.go +++ b/daemon/graphdriver/devmapper/attach_loopback.go @@ -4,6 +4,9 @@ package devmapper import ( "fmt" + "os" + "syscall" + "github.com/dotcloud/docker/utils" ) @@ -14,7 +17,7 @@ func stringToLoopName(src string) [LoNameSize]uint8 { } func getNextFreeLoopbackIndex() (int, error) { - f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644) + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) if err != nil { return 0, err } @@ -27,27 +30,27 @@ func getNextFreeLoopbackIndex() (int, error) { return index, err } -func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) { +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) index++ - fi, err := osStat(target) + fi, err := os.Stat(target) if err != nil { - if osIsNotExist(err) { + if os.IsNotExist(err) { utils.Errorf("There are no more loopback device available.") } return nil, ErrAttachLoopbackDevice } - if fi.Mode()&osModeDevice != osModeDevice { + if fi.Mode()&os.ModeDevice != os.ModeDevice { utils.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC - loopFile, err = osOpenFile(target, osORdWr, 0644) + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) if err != nil { utils.Errorf("Error openning loopback device: %s", err) return nil, ErrAttachLoopbackDevice @@ -58,7 +61,7 @@ func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, loopFile.Close() // If the error is EBUSY, then try the next loopback - if err != sysEBusy { + if err != syscall.EBUSY { utils.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } @@ -80,8 +83,8 @@ func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, } // attachLoopDevice attaches the given sparse file to the next -// available loopback device. It returns an opened *osFile. -func attachLoopDevice(sparseName string) (loop *osFile, err error) { +// available loopback device. It returns an opened *os.File. +func attachLoopDevice(sparseName string) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a @@ -92,7 +95,7 @@ func attachLoopDevice(sparseName string) (loop *osFile, err error) { } // OpenFile adds O_CLOEXEC - sparseFile, err := osOpenFile(sparseName, osORdWr, 0644) + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { utils.Errorf("Error openning sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index a96331d812..48323f6610 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -8,6 +8,8 @@ import ( "fmt" "io" "io/ioutil" + "os" + "os/exec" "path" "path/filepath" "strconv" @@ -16,7 +18,9 @@ import ( "syscall" "time" + "github.com/dotcloud/docker/daemon/graphdriver" "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/units" "github.com/dotcloud/docker/utils" ) @@ -62,8 +66,18 @@ type DeviceSet struct { devicePrefix string TransactionId uint64 NewTransactionId uint64 - nextFreeDevice int - sawBusy bool + nextDeviceId int + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string + metadataDevice string + doBlkDiscard bool } type DiskUsage struct { @@ -109,7 +123,19 @@ func (devices *DeviceSet) loopbackDir() string { return path.Join(devices.root, "devicemapper") } -func (devices *DeviceSet) jsonFile() string { +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *DevInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) oldMetadataFile() string { return path.Join(devices.loopbackDir(), "json") } @@ -125,7 +151,7 @@ func (devices *DeviceSet) hasImage(name string) bool { dirname := devices.loopbackDir() filename := path.Join(dirname, name) - _, err := osStat(filename) + _, err := os.Stat(filename) return err == nil } @@ -137,16 +163,16 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { dirname := devices.loopbackDir() filename := path.Join(dirname, name) - if err := osMkdirAll(dirname, 0700); err != nil && !osIsExist(err) { + if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) { return "", err } - if _, err := osStat(filename); err != nil { - if !osIsNotExist(err) { + if _, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { return "", err } utils.Debugf("Creating loopback file %s for device-manage use", filename) - file, err := osOpenFile(filename, osORdWr|osOCreate, 0600) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return "", err } @@ -159,26 +185,24 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { return filename, nil } -func (devices *DeviceSet) allocateDeviceId() int { - // TODO: Add smarter reuse of deleted devices - id := devices.nextFreeDevice - devices.nextFreeDevice = devices.nextFreeDevice + 1 - return id -} - func (devices *DeviceSet) allocateTransactionId() uint64 { devices.NewTransactionId = devices.NewTransactionId + 1 return devices.NewTransactionId } -func (devices *DeviceSet) saveMetadata() error { - devices.devicesLock.Lock() - jsonData, err := json.Marshal(devices.MetaData) - devices.devicesLock.Unlock() +func (devices *DeviceSet) removeMetadata(info *DevInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +func (devices *DeviceSet) saveMetadata(info *DevInfo) error { + jsonData, err := json.Marshal(info) if err != nil { return fmt.Errorf("Error encoding metadata to json: %s", err) } - tmpFile, err := ioutil.TempFile(filepath.Dir(devices.jsonFile()), ".json") + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") if err != nil { return fmt.Errorf("Error creating metadata file: %s", err) } @@ -196,7 +220,7 @@ func (devices *DeviceSet) saveMetadata() error { if err := tmpFile.Close(); err != nil { return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) } - if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil { + if err := os.Rename(tmpFile.Name(), devices.metadataFile(info)); err != nil { return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) } @@ -214,7 +238,12 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { defer devices.devicesLock.Unlock() info := devices.Devices[hash] if info == nil { - return nil, fmt.Errorf("Unknown device %s", hash) + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("Unknown device %s", hash) + } + + devices.Devices[hash] = info } return info, nil } @@ -234,7 +263,7 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev devices.Devices[hash] = info devices.devicesLock.Unlock() - if err := devices.saveMetadata(); err != nil { + if err := devices.saveMetadata(info); err != nil { // Try to remove unused device devices.devicesLock.Lock() delete(devices.Devices, hash) @@ -258,63 +287,94 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { func (devices *DeviceSet) createFilesystem(info *DevInfo) error { devname := info.DevName() - err := execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname) - if err != nil { - err = execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname) + args := []string{} + for _, arg := range devices.mkfsArgs { + args = append(args, arg) + } + + args = append(args, devname) + + var err error + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + default: + err = fmt.Errorf("Unsupported filesystem type %s", devices.filesystem) } if err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } + return nil } -func (devices *DeviceSet) loadMetaData() error { - utils.Debugf("loadMetadata()") - defer utils.Debugf("loadMetadata END") +func (devices *DeviceSet) initMetaData() error { _, _, _, params, err := getStatus(devices.getPoolName()) if err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } devices.NewTransactionId = devices.TransactionId - jsonData, err := ioutil.ReadFile(devices.jsonFile()) - if err != nil && !osIsNotExist(err) { - utils.Debugf("\n--->Err: %s\n", err) + // Migrate old metadatafile + + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { return err } - devices.MetaData.Devices = make(map[string]*DevInfo) if jsonData != nil { - if err := json.Unmarshal(jsonData, &devices.MetaData); err != nil { - utils.Debugf("\n--->Err: %s\n", err) + m := MetaData{Devices: make(map[string]*DevInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { return err } - } - for hash, d := range devices.Devices { - d.Hash = hash - d.devices = devices + for hash, info := range m.Devices { + info.Hash = hash - if d.DeviceId >= devices.nextFreeDevice { - devices.nextFreeDevice = d.DeviceId + 1 + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId <= devices.TransactionId { + devices.saveMetadata(info) + } + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err } - // If the transaction id is larger than the actual one we lost the device due to some crash - if d.TransactionId > devices.TransactionId { - utils.Debugf("Removing lost device %s with id %d", hash, d.TransactionId) - delete(devices.Devices, hash) - } } + return nil } +func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { + info := &DevInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + return nil + } + + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId > devices.TransactionId { + return nil + } + + return info +} + func (devices *DeviceSet) setupBaseImage() error { oldInfo, _ := devices.lookupDevice("") if oldInfo != nil && oldInfo.Initialized { @@ -324,45 +384,42 @@ func (devices *DeviceSet) setupBaseImage() error { if oldInfo != nil && !oldInfo.Initialized { utils.Debugf("Removing uninitialized base image") if err := devices.deleteDevice(oldInfo); err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } } utils.Debugf("Initializing base device-manager snapshot") - id := devices.allocateDeviceId() + id := devices.nextDeviceId // Create initial device - if err := createDevice(devices.getPoolDevName(), id); err != nil { - utils.Debugf("\n--->Err: %s\n", err) + if err := createDevice(devices.getPoolDevName(), &id); err != nil { return err } - utils.Debugf("Registering base device (id %v) with FS size %v", id, DefaultBaseFsSize) - info, err := devices.registerDevice(id, "", DefaultBaseFsSize) + // Ids are 24bit, so wrap around + devices.nextDeviceId = (id + 1) & 0xffffff + + utils.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) + info, err := devices.registerDevice(id, "", devices.baseFsSize) if err != nil { _ = deleteDevice(devices.getPoolDevName(), id) - utils.Debugf("\n--->Err: %s\n", err) return err } utils.Debugf("Creating filesystem on base device-manager snapshot") if err = devices.activateDeviceIfNeeded(info); err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } if err := devices.createFilesystem(info); err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } info.Initialized = true - if err = devices.saveMetadata(); err != nil { + if err = devices.saveMetadata(info); err != nil { info.Initialized = false - utils.Debugf("\n--->Err: %s\n", err) return err } @@ -372,11 +429,11 @@ func (devices *DeviceSet) setupBaseImage() error { func setCloseOnExec(name string) { if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { for _, i := range fileInfos { - link, _ := osReadlink(filepath.Join("/proc/self/fd", i.Name())) + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) if link == name { fd, err := strconv.Atoi(i.Name()) if err == nil { - sysCloseOnExec(fd) + syscall.CloseOnExec(fd) } } } @@ -388,10 +445,6 @@ func (devices *DeviceSet) log(level int, file string, line int, dmError int, mes return // Ignore _LOG_DEBUG } - if strings.Contains(message, "busy") { - devices.sawBusy = true - } - utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } @@ -408,7 +461,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { datafilename := path.Join(dirname, "data") metadatafilename := path.Join(dirname, "metadata") - datafile, err := osOpenFile(datafilename, osORdWr, 0) + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) if datafile == nil { return err } @@ -429,7 +482,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { } defer dataloopback.Close() - metadatafile, err := osOpenFile(metadatafilename, osORdWr, 0) + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) if metadatafile == nil { return err } @@ -472,39 +525,23 @@ func (devices *DeviceSet) ResizePool(size int64) error { func (devices *DeviceSet) initDevmapper(doInit bool) error { logInit(devices) - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - hasData := devices.hasImage("data") - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasData { - return errors.New("Loopback data file not found") - } - - if !doInit && !hasMetadata { - return errors.New("Loopback metadata file not found") - } - - createdLoopback := !hasData || !hasMetadata - data, err := devices.ensureImage("data", DefaultDataLoopbackSize) + _, err := getDriverVersion() if err != nil { - utils.Debugf("Error device ensureImage (data): %s\n", err) - return err + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported } - metadata, err := devices.ensureImage("metadata", DefaultMetaDataLoopbackSize) - if err != nil { - utils.Debugf("Error device ensureImage (metadata): %s\n", err) + + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { return err } // Set the device prefix from the device id and inode of the docker root dir - st, err := osStat(devices.root) + st, err := os.Stat(devices.root) if err != nil { return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) } - sysSt := toSysStatT(st.Sys()) + sysSt := st.Sys().(*syscall.Stat_t) // "reg-" stands for "regular file". // In the future we might use "dev-" for "device file", etc. // docker-maj,min[-inode] stands for: @@ -527,35 +564,91 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // so we add this badhack to make sure it closes itself setCloseOnExec("/dev/mapper/control") + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + // If the pool doesn't exist, create it if info.Exists == 0 { utils.Debugf("Pool doesn't exist. Creating it.") - dataFile, err := attachLoopDevice(data) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + utils.Debugf("Error device ensureImage (data): %s\n", err) + return err + } + + dataFile, err = attachLoopDevice(data) + if err != nil { + return err + } + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } } defer dataFile.Close() - metadataFile, err := attachLoopDevice(metadata) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + utils.Debugf("Error device ensureImage (metadata): %s\n", err) + return err + } + + metadataFile, err = attachLoopDevice(metadata) + if err != nil { + return err + } + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } } defer metadataFile.Close() if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } } // If we didn't just create the data or metadata image, we need to - // load the metadata from the existing file. + // load the transaction id and migrate old metadata if !createdLoopback { - if err = devices.loadMetaData(); err != nil { - utils.Debugf("\n--->Err: %s\n", err) + if err = devices.initMetaData(); err != nil { return err } } @@ -587,13 +680,16 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return fmt.Errorf("device %s already exists", hash) } - deviceId := devices.allocateDeviceId() + deviceId := devices.nextDeviceId - if err := devices.createSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { utils.Debugf("Error creating snap device: %s\n", err) return err } + // Ids are 24bit, so wrap around + devices.nextDeviceId = (deviceId + 1) & 0xffffff + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { deleteDevice(devices.getPoolDevName(), deviceId) utils.Debugf("Error registering device: %s\n", err) @@ -603,12 +699,14 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { } func (devices *DeviceSet) deleteDevice(info *DevInfo) error { - // This is a workaround for the kernel not discarding block so - // on the thin pool when we remove a thinp device, so we do it - // manually - if err := devices.activateDeviceIfNeeded(info); err == nil { - if err := BlockDeviceDiscard(info.DevName()); err != nil { - utils.Debugf("Error discarding block on device: %s (ignoring)\n", err) + if devices.doBlkDiscard { + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually + if err := devices.activateDeviceIfNeeded(info); err == nil { + if err := BlockDeviceDiscard(info.DevName()); err != nil { + utils.Debugf("Error discarding block on device: %s (ignoring)\n", err) + } } } @@ -620,14 +718,6 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { } } - if info.Initialized { - info.Initialized = false - if err := devices.saveMetadata(); err != nil { - utils.Debugf("Error saving meta data: %s\n", err) - return err - } - } - if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { utils.Debugf("Error deleting device: %s\n", err) return err @@ -638,11 +728,11 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { delete(devices.Devices, info.Hash) devices.devicesLock.Unlock() - if err := devices.saveMetadata(); err != nil { + if err := devices.removeMetadata(info); err != nil { devices.devicesLock.Lock() devices.Devices[info.Hash] = info devices.devicesLock.Unlock() - utils.Debugf("Error saving meta data: %s\n", err) + utils.Debugf("Error removing meta data: %s\n", err) return err } @@ -670,7 +760,6 @@ func (devices *DeviceSet) deactivatePool() error { devname := devices.getPoolDevName() devinfo, err := getInfo(devname) if err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } if devinfo.Exists != 0 { @@ -692,12 +781,10 @@ func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { devinfo, err := getInfo(info.Name()) if err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } if devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { - utils.Debugf("\n--->Err: %s\n", err) return err } } @@ -711,12 +798,11 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error { var err error for i := 0; i < 1000; i++ { - devices.sawBusy = false err = removeDevice(devname) if err == nil { break } - if !devices.sawBusy { + if err != ErrBusy { return err } @@ -813,7 +899,7 @@ func (devices *DeviceSet) Shutdown() error { // We use MNT_DETACH here in case it is still busy in some running // container. This means it'll go away from the global scope directly, // and the device will be released when that container dies. - if err := sysUnmount(info.mountPath, syscall.MNT_DETACH); err != nil { + if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } @@ -871,13 +957,26 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } - var flags uintptr = sysMsMgcVal + var flags uintptr = syscall.MS_MGC_VAL - mountOptions := label.FormatMountLabel("discard", mountLabel) - err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) - if err != nil && err == sysEInval { - mountOptions = label.FormatMountLabel("", mountLabel) - err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + err = syscall.Mount(info.DevName(), path, fstype, flags, joinMountOptions("discard", options)) + if err != nil && err == syscall.EINVAL { + err = syscall.Mount(info.DevName(), path, fstype, flags, options) } if err != nil { return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) @@ -886,7 +985,7 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { info.mountCount = 1 info.mountPath = path - return devices.setInitialized(info) + return nil } func (devices *DeviceSet) UnmountDevice(hash string) error { @@ -914,8 +1013,7 @@ func (devices *DeviceSet) UnmountDevice(hash string) error { } utils.Debugf("[devmapper] Unmount(%s)", info.mountPath) - if err := sysUnmount(info.mountPath, 0); err != nil { - utils.Debugf("\n--->Err: %s\n", err) + if err := syscall.Unmount(info.mountPath, 0); err != nil { return err } utils.Debugf("[devmapper] Unmount done") @@ -937,14 +1035,6 @@ func (devices *DeviceSet) HasDevice(hash string) bool { return info != nil } -func (devices *DeviceSet) HasInitializedDevice(hash string) bool { - devices.Lock() - defer devices.Unlock() - - info, _ := devices.lookupDevice(hash) - return info != nil && info.Initialized -} - func (devices *DeviceSet) HasActivatedDevice(hash string) bool { info, _ := devices.lookupDevice(hash) if info == nil { @@ -961,17 +1051,6 @@ func (devices *DeviceSet) HasActivatedDevice(hash string) bool { return devinfo != nil && devinfo.Exists != 0 } -func (devices *DeviceSet) setInitialized(info *DevInfo) error { - info.Initialized = true - if err := devices.saveMetadata(); err != nil { - info.Initialized = false - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - return nil -} - func (devices *DeviceSet) List() []string { devices.Lock() defer devices.Unlock() @@ -1069,12 +1148,72 @@ func (devices *DeviceSet) Status() *Status { return status } -func NewDeviceSet(root string, doInit bool) (*DeviceSet, error) { +func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error) { SetDevDir("/dev") devices := &DeviceSet{ - root: root, - MetaData: MetaData{Devices: make(map[string]*DevInfo)}, + root: root, + MetaData: MetaData{Devices: make(map[string]*DevInfo)}, + dataLoopbackSize: DefaultDataLoopbackSize, + metaDataLoopbackSize: DefaultMetaDataLoopbackSize, + baseFsSize: DefaultBaseFsSize, + filesystem: "ext4", + doBlkDiscard: true, + } + + foundBlkDiscard := false + for _, option := range options { + key, val, err := utils.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.FromHumanSize(val) + if err != nil { + return nil, err + } + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.FromHumanSize(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.FromHumanSize(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("Unknown option %s\n", key) + } + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && devices.dataDevice != "" { + devices.doBlkDiscard = false } if err := devices.initDevmapper(doInit); err != nil { diff --git a/daemon/graphdriver/devmapper/devmapper.go b/daemon/graphdriver/devmapper/devmapper.go index 7317118dcf..a6602c276e 100644 --- a/daemon/graphdriver/devmapper/devmapper.go +++ b/daemon/graphdriver/devmapper/devmapper.go @@ -5,9 +5,11 @@ package devmapper import ( "errors" "fmt" - "github.com/dotcloud/docker/utils" + "os" "runtime" "syscall" + + "github.com/dotcloud/docker/utils" ) type DevmapperLogger interface { @@ -50,6 +52,7 @@ var ( ErrTaskAddTarget = errors.New("dm_task_add_target failed") ErrTaskSetSector = errors.New("dm_task_set_sector failed") ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") ErrNilCookie = errors.New("cookie ptr can't be nil") ErrAttachLoopbackDevice = errors.New("loopback mounting failed") @@ -62,6 +65,10 @@ var ( ErrInvalidAddNode = errors.New("Invalide AddNoce type") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") + ErrBusy = errors.New("Device is Busy") + + dmSawBusy bool + dmSawExist bool ) type ( @@ -172,6 +179,14 @@ func (t *Task) GetInfo() (*Info, error) { return info, nil } +func (t *Task) GetDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, length uint64, targetType string, params string) { @@ -180,7 +195,7 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, start, length, targetType, params } -func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { utils.Errorf("Error get loopback backing file: %s\n", err) @@ -189,7 +204,7 @@ func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { return loopInfo.loDevice, loopInfo.loInode, nil } -func LoopbackSetCapacity(file *osFile) error { +func LoopbackSetCapacity(file *os.File) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { utils.Errorf("Error loopbackSetCapacity: %s", err) return ErrLoopbackSetCapacity @@ -197,20 +212,20 @@ func LoopbackSetCapacity(file *osFile) error { return nil } -func FindLoopDeviceFor(file *osFile) *osFile { +func FindLoopDeviceFor(file *os.File) *os.File { stat, err := file.Stat() if err != nil { return nil } - targetInode := stat.Sys().(*sysStatT).Ino - targetDevice := stat.Sys().(*sysStatT).Dev + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) - file, err := osOpenFile(path, osORdWr, 0) + file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { - if osIsNotExist(err) { + if os.IsNotExist(err) { return nil } @@ -280,7 +295,7 @@ func RemoveDevice(name string) error { return nil } -func GetBlockDeviceSize(file *osFile) (uint64, error) { +func GetBlockDeviceSize(file *os.File) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { utils.Errorf("Error getblockdevicesize: %s", err) @@ -290,7 +305,7 @@ func GetBlockDeviceSize(file *osFile) (uint64, error) { } func BlockDeviceDiscard(path string) error { - file, err := osOpenFile(path, osORdWr, 0) + file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { return err } @@ -313,7 +328,7 @@ func BlockDeviceDiscard(path string) error { } // This is the programmatic example of "dmsetup create" -func createPool(poolName string, dataFile, metadataFile *osFile) error { +func createPool(poolName string, dataFile, metadataFile *os.File) error { task, err := createTask(DeviceCreate, poolName) if task == nil { return err @@ -321,21 +336,21 @@ func createPool(poolName string, dataFile, metadataFile *osFile) error { size, err := GetBlockDeviceSize(dataFile) if err != nil { - return fmt.Errorf("Can't get data size") + return fmt.Errorf("Can't get data size %s", err) } params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("Can't add target") + return fmt.Errorf("Can't add target %s", err) } var cookie uint = 0 if err := task.SetCookie(&cookie, 0); err != nil { - return fmt.Errorf("Can't set cookie") + return fmt.Errorf("Can't set cookie %s", err) } if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (createPool)") + return fmt.Errorf("Error running DeviceCreate (createPool) %s", err) } UdevWait(cookie) @@ -343,7 +358,7 @@ func createPool(poolName string, dataFile, metadataFile *osFile) error { return nil } -func reloadPool(poolName string, dataFile, metadataFile *osFile) error { +func reloadPool(poolName string, dataFile, metadataFile *os.File) error { task, err := createTask(DeviceReload, poolName) if task == nil { return err @@ -351,16 +366,16 @@ func reloadPool(poolName string, dataFile, metadataFile *osFile) error { size, err := GetBlockDeviceSize(dataFile) if err != nil { - return fmt.Errorf("Can't get data size") + return fmt.Errorf("Can't get data size %s", err) } params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768" if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("Can't add target") + return fmt.Errorf("Can't add target %s", err) } if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate") + return fmt.Errorf("Error running DeviceCreate %s", err) } return nil @@ -388,6 +403,17 @@ func getInfo(name string) (*Info, error) { return task.GetInfo() } +func getDriverVersion() (string, error) { + task := TaskCreate(DeviceVersion) + if task == nil { + return "", fmt.Errorf("Can't create DeviceVersion task") + } + if err := task.Run(); err != nil { + return "", err + } + return task.GetDriverVersion() +} + func getStatus(name string) (uint64, uint64, string, string, error) { task, err := createTask(DeviceStatus, name) if task == nil { @@ -420,15 +446,15 @@ func setTransactionId(poolName string, oldId uint64, newId uint64) error { } if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") + return fmt.Errorf("Can't set sector %s", err) } if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { - return fmt.Errorf("Can't set message") + return fmt.Errorf("Can't set message %s", err) } if err := task.Run(); err != nil { - return fmt.Errorf("Error running setTransactionId") + return fmt.Errorf("Error running setTransactionId %s", err) } return nil } @@ -439,7 +465,7 @@ func suspendDevice(name string) error { return err } if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceSuspend: %s", err) + return fmt.Errorf("Error running DeviceSuspend %s", err) } return nil } @@ -452,11 +478,11 @@ func resumeDevice(name string) error { var cookie uint = 0 if err := task.SetCookie(&cookie, 0); err != nil { - return fmt.Errorf("Can't set cookie") + return fmt.Errorf("Can't set cookie %s", err) } if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceResume") + return fmt.Errorf("Error running DeviceResume %s", err) } UdevWait(cookie) @@ -464,23 +490,33 @@ func resumeDevice(name string) error { return nil } -func createDevice(poolName string, deviceId int) error { - utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, deviceId) - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - return err - } +func createDevice(poolName string, deviceId *int) error { + utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") - } + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } - if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { - return fmt.Errorf("Can't set message") - } + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } - if err := task.Run(); err != nil { - return fmt.Errorf("Error running createDevice") + if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + return fmt.Errorf("Error running createDevice %s", err) + } + break } return nil } @@ -492,15 +528,15 @@ func deleteDevice(poolName string, deviceId int) error { } if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") + return fmt.Errorf("Can't set sector %s", err) } if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { - return fmt.Errorf("Can't set message") + return fmt.Errorf("Can't set message %s", err) } if err := task.Run(); err != nil { - return fmt.Errorf("Error running deleteDevice") + return fmt.Errorf("Error running deleteDevice %s", err) } return nil } @@ -512,8 +548,12 @@ func removeDevice(name string) error { if task == nil { return err } + dmSawBusy = false if err = task.Run(); err != nil { - return fmt.Errorf("Error running removeDevice") + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("Error running removeDevice %s", err) } return nil } @@ -526,19 +566,19 @@ func activateDevice(poolName string, name string, deviceId int, size uint64) err params := fmt.Sprintf("%s %d", poolName, deviceId) if err := task.AddTarget(0, size/512, "thin", params); err != nil { - return fmt.Errorf("Can't add target") + return fmt.Errorf("Can't add target %s", err) } if err := task.SetAddNode(AddNodeOnCreate); err != nil { - return fmt.Errorf("Can't add node") + return fmt.Errorf("Can't add node %s", err) } var cookie uint = 0 if err := task.SetCookie(&cookie, 0); err != nil { - return fmt.Errorf("Can't set cookie") + return fmt.Errorf("Can't set cookie %s", err) } if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (activateDevice)") + return fmt.Errorf("Error running DeviceCreate (activateDevice) %s", err) } UdevWait(cookie) @@ -546,7 +586,7 @@ func activateDevice(poolName string, name string, deviceId int, size uint64) err return nil } -func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { +func createSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { devinfo, _ := getInfo(baseName) doSuspend := devinfo != nil && devinfo.Exists != 0 @@ -556,33 +596,44 @@ func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseNa } } - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - if doSuspend { - resumeDevice(baseName) + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + resumeDevice(baseName) + } + return err } - return err - } - if err := task.SetSector(0); err != nil { - if doSuspend { - resumeDevice(baseName) + if err := task.SetSector(0); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set sector %s", err) } - return fmt.Errorf("Can't set sector") - } - if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { - if doSuspend { - resumeDevice(baseName) + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set message %s", err) } - return fmt.Errorf("Can't set message") - } - if err := task.Run(); err != nil { - if doSuspend { - resumeDevice(baseName) + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) } - return fmt.Errorf("Error running DeviceCreate (createSnapDevice)") + + break } if doSuspend { diff --git a/daemon/graphdriver/devmapper/devmapper_log.go b/daemon/graphdriver/devmapper/devmapper_log.go index 18dde7cca5..cdeaed2525 100644 --- a/daemon/graphdriver/devmapper/devmapper_log.go +++ b/daemon/graphdriver/devmapper/devmapper_log.go @@ -4,12 +4,27 @@ package devmapper import "C" +import ( + "strings" +) + // Due to the way cgo works this has to be in a separate file, as devmapper.go has // definitions in the cgo block, which is incompatible with using "//export" //export DevmapperLogCallback func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + } + if dmLogger != nil { - dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), C.GoString(message)) + dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) } } diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go index 3ffa163ceb..7c97d6bb04 100644 --- a/daemon/graphdriver/devmapper/devmapper_test.go +++ b/daemon/graphdriver/devmapper/devmapper_test.go @@ -3,285 +3,35 @@ package devmapper import ( + "github.com/dotcloud/docker/daemon/graphdriver/graphtest" "testing" ) -func TestTaskCreate(t *testing.T) { - t.Skip("FIXME: not a unit test") - // Test success - taskCreate(t, DeviceInfo) - - // Test Failure - DmTaskCreate = dmTaskCreateFail - defer func() { DmTaskCreate = dmTaskCreateFct }() - if task := TaskCreate(-1); task != nil { - t.Fatalf("An error should have occured while creating an invalid task.") - } +func init() { + // Reduce the size the the base fs and loopback for the tests + DefaultDataLoopbackSize = 300 * 1024 * 1024 + DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 + DefaultBaseFsSize = 300 * 1024 * 1024 } -func TestTaskRun(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - // Perform the RUN - if err := task.Run(); err != nil { - t.Fatal(err) - } - // Make sure we don't have error with GetInfo - if _, err := task.GetInfo(); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskRun = dmTaskRunFail - defer func() { DmTaskRun = dmTaskRunFct }() - - task = taskCreate(t, DeviceInfo) - // Perform the RUN - if err := task.Run(); err != ErrTaskRun { - t.Fatalf("An error should have occured while running task.") - } - // Make sure GetInfo also fails - if _, err := task.GetInfo(); err != ErrTaskGetInfo { - t.Fatalf("GetInfo should fail if task.Run() failed.") - } +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") } -func TestTaskSetName(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetName("test"); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetName = dmTaskSetNameFail - defer func() { DmTaskSetName = dmTaskSetNameFct }() - - if err := task.SetName("test"); err != ErrTaskSetName { - t.Fatalf("An error should have occured while runnign SetName.") - } +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") } -func TestTaskSetMessage(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetMessage("test"); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetMessage = dmTaskSetMessageFail - defer func() { DmTaskSetMessage = dmTaskSetMessageFct }() - - if err := task.SetMessage("test"); err != ErrTaskSetMessage { - t.Fatalf("An error should have occured while runnign SetMessage.") - } +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") } -func TestTaskSetSector(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetSector(128); err != nil { - t.Fatal(err) - } - - DmTaskSetSector = dmTaskSetSectorFail - defer func() { DmTaskSetSector = dmTaskSetSectorFct }() - - // Test failure - if err := task.SetSector(0); err != ErrTaskSetSector { - t.Fatalf("An error should have occured while running SetSector.") - } +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") } -func TestTaskSetCookie(t *testing.T) { - t.Skip("FIXME: not a unit test") - var ( - cookie uint = 0 - task = taskCreate(t, DeviceInfo) - ) - - // Test success - if err := task.SetCookie(&cookie, 0); err != nil { - t.Fatal(err) - } - - // Test failure - if err := task.SetCookie(nil, 0); err != ErrNilCookie { - t.Fatalf("An error should have occured while running SetCookie with nil cookie.") - } - - DmTaskSetCookie = dmTaskSetCookieFail - defer func() { DmTaskSetCookie = dmTaskSetCookieFct }() - - if err := task.SetCookie(&cookie, 0); err != ErrTaskSetCookie { - t.Fatalf("An error should have occured while running SetCookie.") - } -} - -func TestTaskSetAddNode(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetAddNode(0); err != nil { - t.Fatal(err) - } - - // Test failure - if err := task.SetAddNode(-1); err != ErrInvalidAddNode { - t.Fatalf("An error should have occured running SetAddNode with wrong node.") - } - - DmTaskSetAddNode = dmTaskSetAddNodeFail - defer func() { DmTaskSetAddNode = dmTaskSetAddNodeFct }() - - if err := task.SetAddNode(0); err != ErrTaskSetAddNode { - t.Fatalf("An error should have occured running SetAddNode.") - } -} - -func TestTaskSetRo(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetRo(); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetRo = dmTaskSetRoFail - defer func() { DmTaskSetRo = dmTaskSetRoFct }() - - if err := task.SetRo(); err != ErrTaskSetRo { - t.Fatalf("An error should have occured running SetRo.") - } -} - -func TestTaskAddTarget(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.AddTarget(0, 128, "thinp", ""); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskAddTarget = dmTaskAddTargetFail - defer func() { DmTaskAddTarget = dmTaskAddTargetFct }() - - if err := task.AddTarget(0, 128, "thinp", ""); err != ErrTaskAddTarget { - t.Fatalf("An error should have occured running AddTarget.") - } -} - -// func TestTaskGetInfo(t *testing.T) { -// task := taskCreate(t, DeviceInfo) - -// // Test success -// if _, err := task.GetInfo(); err != nil { -// t.Fatal(err) -// } - -// // Test failure -// DmTaskGetInfo = dmTaskGetInfoFail -// defer func() { DmTaskGetInfo = dmTaskGetInfoFct }() - -// if _, err := task.GetInfo(); err != ErrTaskGetInfo { -// t.Fatalf("An error should have occured running GetInfo.") -// } -// } - -// func TestTaskGetNextTarget(t *testing.T) { -// task := taskCreate(t, DeviceInfo) - -// if next, _, _, _, _ := task.GetNextTarget(0); next == 0 { -// t.Fatalf("The next target should not be 0.") -// } -// } - -/// Utils -func taskCreate(t *testing.T, taskType TaskType) *Task { - task := TaskCreate(taskType) - if task == nil { - t.Fatalf("Error creating task") - } - return task -} - -/// Failure function replacement -func dmTaskCreateFail(t int) *CDmTask { - return nil -} - -func dmTaskRunFail(task *CDmTask) int { - return -1 -} - -func dmTaskSetNameFail(task *CDmTask, name string) int { - return -1 -} - -func dmTaskSetMessageFail(task *CDmTask, message string) int { - return -1 -} - -func dmTaskSetSectorFail(task *CDmTask, sector uint64) int { - return -1 -} - -func dmTaskSetCookieFail(task *CDmTask, cookie *uint, flags uint16) int { - return -1 -} - -func dmTaskSetAddNodeFail(task *CDmTask, addNode AddNodeType) int { - return -1 -} - -func dmTaskSetRoFail(task *CDmTask) int { - return -1 -} - -func dmTaskAddTargetFail(task *CDmTask, - start, size uint64, ttype, params string) int { - return -1 -} - -func dmTaskGetInfoFail(task *CDmTask, info *Info) int { - return -1 -} - -func dmGetNextTargetFail(task *CDmTask, next uintptr, start, length *uint64, - target, params *string) uintptr { - return 0 -} - -func dmAttachLoopDeviceFail(filename string, fd *int) string { - return "" -} - -func sysGetBlockSizeFail(fd uintptr, size *uint64) sysErrno { - return 1 -} - -func dmUdevWaitFail(cookie uint) int { - return -1 -} - -func dmSetDevDirFail(dir string) int { - return -1 -} - -func dmGetLibraryVersionFail(version *string) int { - return -1 +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) } diff --git a/daemon/graphdriver/devmapper/devmapper_wrapper.go b/daemon/graphdriver/devmapper/devmapper_wrapper.go index bf558affc8..9f1b5a6054 100644 --- a/daemon/graphdriver/devmapper/devmapper_wrapper.go +++ b/daemon/graphdriver/devmapper/devmapper_wrapper.go @@ -85,23 +85,24 @@ const ( ) var ( - DmGetLibraryVersion = dmGetLibraryVersionFct - DmGetNextTarget = dmGetNextTargetFct - DmLogInitVerbose = dmLogInitVerboseFct - DmSetDevDir = dmSetDevDirFct - DmTaskAddTarget = dmTaskAddTargetFct - DmTaskCreate = dmTaskCreateFct - DmTaskDestroy = dmTaskDestroyFct - DmTaskGetInfo = dmTaskGetInfoFct - DmTaskRun = dmTaskRunFct - DmTaskSetAddNode = dmTaskSetAddNodeFct - DmTaskSetCookie = dmTaskSetCookieFct - DmTaskSetMessage = dmTaskSetMessageFct - DmTaskSetName = dmTaskSetNameFct - DmTaskSetRo = dmTaskSetRoFct - DmTaskSetSector = dmTaskSetSectorFct - DmUdevWait = dmUdevWaitFct - LogWithErrnoInit = logWithErrnoInitFct + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + LogWithErrnoInit = logWithErrnoInitFct ) func free(p *C.char) { @@ -184,6 +185,16 @@ func dmTaskGetInfoFct(task *CDmTask, info *Info) int { return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) } +func dmTaskGetDriverVersionFct(task *CDmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { var ( Cstart, Clength C.uint64_t diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 9f240d96e0..cf82ad62ed 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -9,6 +9,7 @@ import ( "path" "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/utils" ) @@ -26,15 +27,21 @@ type Driver struct { home string } -var Init = func(home string) (graphdriver.Driver, error) { - deviceSet, err := NewDeviceSet(home, true) +func Init(home string, options []string) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options) if err != nil { return nil, err } + + if err := graphdriver.MakePrivate(home); err != nil { + return nil, err + } + d := &Driver{ DeviceSet: deviceSet, home: home, } + return d, nil } @@ -58,7 +65,13 @@ func (d *Driver) Status() [][2]string { } func (d *Driver) Cleanup() error { - return d.DeviceSet.Shutdown() + err := d.DeviceSet.Shutdown() + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err } func (d *Driver) Create(id, parent string) error { @@ -94,7 +107,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { mp := path.Join(d.home, "mnt", id) // Create the target directories if they don't exist - if err := osMkdirAll(mp, 0755); err != nil && !osIsExist(err) { + if err := os.MkdirAll(mp, 0755); err != nil && !os.IsExist(err) { return "", err } @@ -104,13 +117,13 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { } rootFs := path.Join(mp, "rootfs") - if err := osMkdirAll(rootFs, 0755); err != nil && !osIsExist(err) { + if err := os.MkdirAll(rootFs, 0755); err != nil && !os.IsExist(err) { d.DeviceSet.UnmountDevice(id) return "", err } idFile := path.Join(mp, "id") - if _, err := osStat(idFile); err != nil && osIsNotExist(err) { + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { // Create an "id" file with the container/image id in it to help reconscruct this in case // of later problems if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { diff --git a/daemon/graphdriver/devmapper/driver_test.go b/daemon/graphdriver/devmapper/driver_test.go deleted file mode 100644 index 913add7c8b..0000000000 --- a/daemon/graphdriver/devmapper/driver_test.go +++ /dev/null @@ -1,880 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "fmt" - "github.com/dotcloud/docker/daemon/graphdriver" - "io/ioutil" - "path" - "runtime" - "strings" - "syscall" - "testing" -) - -func init() { - // Reduce the size the the base fs and loopback for the tests - DefaultDataLoopbackSize = 300 * 1024 * 1024 - DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 - DefaultBaseFsSize = 300 * 1024 * 1024 -} - -// denyAllDevmapper mocks all calls to libdevmapper in the unit tests, and denies them by default -func denyAllDevmapper() { - // Hijack all calls to libdevmapper with default panics. - // Authorized calls are selectively hijacked in each tests. - DmTaskCreate = func(t int) *CDmTask { - panic("DmTaskCreate: this method should not be called here") - } - DmTaskRun = func(task *CDmTask) int { - panic("DmTaskRun: this method should not be called here") - } - DmTaskSetName = func(task *CDmTask, name string) int { - panic("DmTaskSetName: this method should not be called here") - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - panic("DmTaskSetMessage: this method should not be called here") - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - panic("DmTaskSetSector: this method should not be called here") - } - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - panic("DmTaskSetCookie: this method should not be called here") - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - panic("DmTaskSetAddNode: this method should not be called here") - } - DmTaskSetRo = func(task *CDmTask) int { - panic("DmTaskSetRo: this method should not be called here") - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - panic("DmTaskAddTarget: this method should not be called here") - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - panic("DmTaskGetInfo: this method should not be called here") - } - DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { - panic("DmGetNextTarget: this method should not be called here") - } - DmUdevWait = func(cookie uint) int { - panic("DmUdevWait: this method should not be called here") - } - DmSetDevDir = func(dir string) int { - panic("DmSetDevDir: this method should not be called here") - } - DmGetLibraryVersion = func(version *string) int { - panic("DmGetLibraryVersion: this method should not be called here") - } - DmLogInitVerbose = func(level int) { - panic("DmLogInitVerbose: this method should not be called here") - } - DmTaskDestroy = func(task *CDmTask) { - panic("DmTaskDestroy: this method should not be called here") - } - LogWithErrnoInit = func() { - panic("LogWithErrnoInit: this method should not be called here") - } -} - -func denyAllSyscall() { - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - panic("sysMount: this method should not be called here") - } - sysUnmount = func(target string, flags int) (err error) { - panic("sysUnmount: this method should not be called here") - } - sysCloseOnExec = func(fd int) { - panic("sysCloseOnExec: this method should not be called here") - } - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - panic("sysSyscall: this method should not be called here") - } - // Not a syscall, but forbidding it here anyway - Mounted = func(mnt string) (bool, error) { - panic("devmapper.Mounted: this method should not be called here") - } - // osOpenFile = os.OpenFile - // osNewFile = os.NewFile - // osCreate = os.Create - // osStat = os.Stat - // osIsNotExist = os.IsNotExist - // osIsExist = os.IsExist - // osMkdirAll = os.MkdirAll - // osRemoveAll = os.RemoveAll - // osRename = os.Rename - // osReadlink = os.Readlink - - // execRun = func(name string, args ...string) error { - // return exec.Command(name, args...).Run() - // } -} - -func mkTestDirectory(t *testing.T) string { - dir, err := ioutil.TempDir("", "docker-test-devmapper-") - if err != nil { - t.Fatal(err) - } - return dir -} - -func newDriver(t *testing.T) *Driver { - home := mkTestDirectory(t) - d, err := Init(home) - if err != nil { - t.Fatal(err) - } - return d.(*Driver) -} - -func cleanup(d *Driver) { - d.Cleanup() - osRemoveAll(d.home) -} - -type Set map[string]bool - -func (r Set) Assert(t *testing.T, names ...string) { - for _, key := range names { - required := true - if strings.HasPrefix(key, "?") { - key = key[1:] - required = false - } - if _, exists := r[key]; !exists && required { - t.Fatalf("Key not set: %s", key) - } - delete(r, key) - } - if len(r) != 0 { - t.Fatalf("Unexpected keys: %v", r) - } -} - -func TestInit(t *testing.T) { - var ( - calls = make(Set) - taskMessages = make(Set) - taskTypes = make(Set) - home = mkTestDirectory(t) - ) - defer osRemoveAll(home) - - func() { - denyAllDevmapper() - DmSetDevDir = func(dir string) int { - calls["DmSetDevDir"] = true - expectedDir := "/dev" - if dir != expectedDir { - t.Fatalf("Wrong libdevmapper call\nExpected: DmSetDevDir(%v)\nReceived: DmSetDevDir(%v)\n", expectedDir, dir) - } - return 0 - } - LogWithErrnoInit = func() { - calls["DmLogWithErrnoInit"] = true - } - var task1 CDmTask - DmTaskCreate = func(taskType int) *CDmTask { - calls["DmTaskCreate"] = true - taskTypes[fmt.Sprintf("%d", taskType)] = true - return &task1 - } - DmTaskSetName = func(task *CDmTask, name string) int { - calls["DmTaskSetName"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", expectedTask, task) - } - // FIXME: use Set.AssertRegexp() - if !strings.HasPrefix(name, "docker-") && !strings.HasPrefix(name, "/dev/mapper/docker-") || - !strings.HasSuffix(name, "-pool") && !strings.HasSuffix(name, "-base") { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", "docker-...-pool", name) - } - return 1 - } - DmTaskRun = func(task *CDmTask) int { - calls["DmTaskRun"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskRun(%v)\nReceived: DmTaskRun(%v)\n", expectedTask, task) - } - return 1 - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - calls["DmTaskGetInfo"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskGetInfo(%v)\nReceived: DmTaskGetInfo(%v)\n", expectedTask, task) - } - // This will crash if info is not dereferenceable - info.Exists = 0 - return 1 - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - calls["DmTaskSetSector"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) - } - if expectedSector := uint64(0); sector != expectedSector { - t.Fatalf("Wrong libdevmapper call to DmTaskSetSector\nExpected: %v\nReceived: %v\n", expectedSector, sector) - } - return 1 - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - calls["DmTaskSetMessage"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) - } - taskMessages[message] = true - return 1 - } - DmTaskDestroy = func(task *CDmTask) { - calls["DmTaskDestroy"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - calls["DmTaskSetTarget"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - if start != 0 { - t.Fatalf("Wrong start: %d != %d", start, 0) - } - if ttype != "thin" && ttype != "thin-pool" { - t.Fatalf("Wrong ttype: %s", ttype) - } - // Quick smoke test - if params == "" { - t.Fatalf("Params should not be empty") - } - return 1 - } - fakeCookie := uint(4321) - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - calls["DmTaskSetCookie"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - if flags != 0 { - t.Fatalf("Cookie flags should be 0 (not %x)", flags) - } - *cookie = fakeCookie - return 1 - } - DmUdevWait = func(cookie uint) int { - calls["DmUdevWait"] = true - if cookie != fakeCookie { - t.Fatalf("Wrong cookie: %d != %d", cookie, fakeCookie) - } - return 1 - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - if addNode != AddNodeOnCreate { - t.Fatalf("Wrong AddNoteType: %v (expected %v)", addNode, AddNodeOnCreate) - } - calls["DmTaskSetAddNode"] = true - return 1 - } - execRun = func(name string, args ...string) error { - calls["execRun"] = true - if name != "mkfs.ext4" { - t.Fatalf("Expected %s to be executed, not %s", "mkfs.ext4", name) - } - return nil - } - driver, err := Init(home) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := driver.Cleanup(); err != nil { - t.Fatal(err) - } - }() - }() - // Put all tests in a function to make sure the garbage collection will - // occur. - - // Call GC to cleanup runtime.Finalizers - runtime.GC() - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "DmTaskDestroy", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - ) - taskTypes.Assert(t, "0", "6", "17") - taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1") -} - -func fakeInit() func(home string) (graphdriver.Driver, error) { - oldInit := Init - Init = func(home string) (graphdriver.Driver, error) { - return &Driver{ - home: home, - }, nil - } - return oldInit -} - -func restoreInit(init func(home string) (graphdriver.Driver, error)) { - Init = init -} - -func mockAllDevmapper(calls Set) { - DmSetDevDir = func(dir string) int { - calls["DmSetDevDir"] = true - return 0 - } - LogWithErrnoInit = func() { - calls["DmLogWithErrnoInit"] = true - } - DmTaskCreate = func(taskType int) *CDmTask { - calls["DmTaskCreate"] = true - return &CDmTask{} - } - DmTaskSetName = func(task *CDmTask, name string) int { - calls["DmTaskSetName"] = true - return 1 - } - DmTaskRun = func(task *CDmTask) int { - calls["DmTaskRun"] = true - return 1 - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - calls["DmTaskGetInfo"] = true - return 1 - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - calls["DmTaskSetSector"] = true - return 1 - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - calls["DmTaskSetMessage"] = true - return 1 - } - DmTaskDestroy = func(task *CDmTask) { - calls["DmTaskDestroy"] = true - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - calls["DmTaskSetTarget"] = true - return 1 - } - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - calls["DmTaskSetCookie"] = true - return 1 - } - DmUdevWait = func(cookie uint) int { - calls["DmUdevWait"] = true - return 1 - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - calls["DmTaskSetAddNode"] = true - return 1 - } - execRun = func(name string, args ...string) error { - calls["execRun"] = true - return nil - } -} - -func TestDriverName(t *testing.T) { - denyAllDevmapper() - defer denyAllDevmapper() - - oldInit := fakeInit() - defer restoreInit(oldInit) - - d := newDriver(t) - if d.String() != "devicemapper" { - t.Fatalf("Expected driver name to be devicemapper got %s", d.String()) - } -} - -func TestDriverCreate(t *testing.T) { - denyAllDevmapper() - denyAllSyscall() - defer denyAllSyscall() - defer denyAllDevmapper() - - calls := make(Set) - mockAllDevmapper(calls) - - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - calls["sysMount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) - } - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFstype := "ext4"; fstype != expectedFstype { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) - } - if expectedFlags := uintptr(3236757504); flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - - sysUnmount = func(target string, flag int) error { - //calls["sysUnmount"] = true - - return nil - } - - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") { - t.Fatalf("Wrong mounted call\nExpected: Mounted(%v)\nReceived: Mounted(%v)\n", "/tmp/docker-test-devmapper-.../mnt/1", mnt) - } - return false, nil - } - - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - calls["sysSyscall"] = true - if trap != sysSysIoctl { - t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) - } - switch a2 { - case LoopSetFd: - calls["ioctl.loopsetfd"] = true - case LoopCtlGetFree: - calls["ioctl.loopctlgetfree"] = true - case LoopGetStatus64: - calls["ioctl.loopgetstatus"] = true - case LoopSetStatus64: - calls["ioctl.loopsetstatus"] = true - case LoopClrFd: - calls["ioctl.loopclrfd"] = true - case LoopSetCapacity: - calls["ioctl.loopsetcapacity"] = true - case BlkGetSize64: - calls["ioctl.blkgetsize"] = true - default: - t.Fatalf("Unexpected IOCTL. Received %d", a2) - } - return 0, 0, 0 - } - - func() { - d := newDriver(t) - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - "sysSyscall", - "ioctl.blkgetsize", - "ioctl.loopsetfd", - "ioctl.loopsetstatus", - "?ioctl.loopctlgetfree", - ) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - calls.Assert(t, - "DmTaskCreate", - "DmTaskGetInfo", - "DmTaskRun", - "DmTaskSetSector", - "DmTaskSetName", - "DmTaskSetMessage", - ) - - }() - - runtime.GC() - - calls.Assert(t, - "DmTaskDestroy", - ) -} - -func TestDriverRemove(t *testing.T) { - denyAllDevmapper() - denyAllSyscall() - defer denyAllSyscall() - defer denyAllDevmapper() - - calls := make(Set) - mockAllDevmapper(calls) - - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - calls["sysMount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) - } - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFstype := "ext4"; fstype != expectedFstype { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) - } - if expectedFlags := uintptr(3236757504); flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - sysUnmount = func(target string, flags int) (err error) { - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFlags := 0; flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - return false, nil - } - - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - calls["sysSyscall"] = true - if trap != sysSysIoctl { - t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) - } - switch a2 { - case LoopSetFd: - calls["ioctl.loopsetfd"] = true - case LoopCtlGetFree: - calls["ioctl.loopctlgetfree"] = true - case LoopGetStatus64: - calls["ioctl.loopgetstatus"] = true - case LoopSetStatus64: - calls["ioctl.loopsetstatus"] = true - case LoopClrFd: - calls["ioctl.loopclrfd"] = true - case LoopSetCapacity: - calls["ioctl.loopsetcapacity"] = true - case BlkGetSize64: - calls["ioctl.blkgetsize"] = true - default: - t.Fatalf("Unexpected IOCTL. Received %d", a2) - } - return 0, 0, 0 - } - - func() { - d := newDriver(t) - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - "sysSyscall", - "ioctl.blkgetsize", - "ioctl.loopsetfd", - "ioctl.loopsetstatus", - "?ioctl.loopctlgetfree", - ) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - calls.Assert(t, - "DmTaskCreate", - "DmTaskGetInfo", - "DmTaskRun", - "DmTaskSetSector", - "DmTaskSetName", - "DmTaskSetMessage", - ) - - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - return true, nil - } - - if err := d.Remove("1"); err != nil { - t.Fatal(err) - } - - calls.Assert(t, - "DmTaskRun", - "DmTaskSetSector", - "DmTaskSetName", - "DmTaskSetMessage", - "DmTaskCreate", - "DmTaskGetInfo", - "DmTaskSetCookie", - "DmTaskSetTarget", - "DmTaskSetAddNode", - "DmUdevWait", - ) - }() - runtime.GC() - - calls.Assert(t, - "DmTaskDestroy", - ) -} - -func TestCleanup(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skip("Unimplemented") - d := newDriver(t) - defer osRemoveAll(d.home) - - mountPoints := make([]string, 2) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - // Mount the id - p, err := d.Get("1", "") - if err != nil { - t.Fatal(err) - } - mountPoints[0] = p - - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - p, err = d.Get("2", "") - if err != nil { - t.Fatal(err) - } - mountPoints[1] = p - - // Ensure that all the mount points are currently mounted - for _, p := range mountPoints { - if mounted, err := Mounted(p); err != nil { - t.Fatal(err) - } else if !mounted { - t.Fatalf("Expected %s to be mounted", p) - } - } - - // Ensure that devices are active - for _, p := range []string{"1", "2"} { - if !d.HasActivatedDevice(p) { - t.Fatalf("Expected %s to have an active device", p) - } - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - - // Ensure that all the mount points are no longer mounted - for _, p := range mountPoints { - if mounted, err := Mounted(p); err != nil { - t.Fatal(err) - } else if mounted { - t.Fatalf("Expected %s to not be mounted", p) - } - } - - // Ensure that devices are no longer activated - for _, p := range []string{"1", "2"} { - if d.HasActivatedDevice(p) { - t.Fatalf("Expected %s not be an active device", p) - } - } -} - -func TestNotMounted(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skip("Not implemented") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - mounted, err := Mounted(path.Join(d.home, "mnt", "1")) - if err != nil { - t.Fatal(err) - } - if mounted { - t.Fatal("Id 1 should not be mounted") - } -} - -func TestMounted(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } - - mounted, err := Mounted(path.Join(d.home, "mnt", "1")) - if err != nil { - t.Fatal(err) - } - if !mounted { - t.Fatal("Id 1 should be mounted") - } -} - -func TestInitCleanedDriver(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - - driver, err := Init(d.home) - if err != nil { - t.Fatal(err) - } - d = driver.(*Driver) - defer cleanup(d) - - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } -} - -func TestMountMountedDriver(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - // Perform get on same id to ensure that it will - // not be mounted twice - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } -} - -func TestGetReturnsValidDevice(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if !d.HasDevice("1") { - t.Fatalf("Expected id 1 to be in device set") - } - - if _, err := d.Get("1", ""); err != nil { - t.Fatal(err) - } - - if !d.HasActivatedDevice("1") { - t.Fatalf("Expected id 1 to be activated") - } - - if !d.HasInitializedDevice("1") { - t.Fatalf("Expected id 1 to be initialized") - } -} - -func TestDriverGetSize(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skipf("Size is currently not implemented") - - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - mountPoint, err := d.Get("1", "") - if err != nil { - t.Fatal(err) - } - - size := int64(1024) - - f, err := osCreate(path.Join(mountPoint, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - // diffSize, err := d.DiffSize("1") - // if err != nil { - // t.Fatal(err) - // } - // if diffSize != size { - // t.Fatalf("Expected size %d got %d", size, diffSize) - // } -} - -func assertMap(t *testing.T, m map[string]bool, keys ...string) { - for _, key := range keys { - if _, exists := m[key]; !exists { - t.Fatalf("Key not set: %s", key) - } - delete(m, key) - } - if len(m) != 0 { - t.Fatalf("Unexpected keys: %v", m) - } -} diff --git a/daemon/graphdriver/devmapper/ioctl.go b/daemon/graphdriver/devmapper/ioctl.go index 30bafff943..8f403da2b0 100644 --- a/daemon/graphdriver/devmapper/ioctl.go +++ b/daemon/graphdriver/devmapper/ioctl.go @@ -3,11 +3,12 @@ package devmapper import ( + "syscall" "unsafe" ) func ioctlLoopCtlGetFree(fd uintptr) (int, error) { - index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0) + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) if err != 0 { return 0, err } @@ -15,21 +16,21 @@ func ioctlLoopCtlGetFree(fd uintptr) (int, error) { } func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { return err } return nil } func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return err } return nil } func ioctlLoopClrFd(loopFd uintptr) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { return err } return nil @@ -38,14 +39,14 @@ func ioctlLoopClrFd(loopFd uintptr) error { func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { loopInfo := &LoopInfo64{} - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return nil, err } return loopInfo, nil } func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { return err } return nil @@ -53,7 +54,7 @@ func ioctlLoopSetCapacity(loopFd uintptr, value int) error { func ioctlBlkGetSize64(fd uintptr) (int64, error) { var size int64 - if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { return 0, err } return size, nil @@ -64,7 +65,7 @@ func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { r[0] = offset r[1] = length - if _, _, err := sysSyscall(sysSysIoctl, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { return err } return nil diff --git a/daemon/graphdriver/devmapper/mount.go b/daemon/graphdriver/devmapper/mount.go index 4f19109bf8..c9ff216d5d 100644 --- a/daemon/graphdriver/devmapper/mount.go +++ b/daemon/graphdriver/devmapper/mount.go @@ -3,25 +3,84 @@ package devmapper import ( + "bytes" + "fmt" + "os" "path/filepath" + "syscall" ) // FIXME: this is copy-pasted from the aufs driver. // It should be moved into the core. -var Mounted = func(mountpoint string) (bool, error) { - mntpoint, err := osStat(mountpoint) +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) if err != nil { - if osIsNotExist(err) { + if os.IsNotExist(err) { return false, nil } return false, err } - parent, err := osStat(filepath.Join(mountpoint, "..")) + parent, err := os.Stat(filepath.Join(mountpoint, "..")) if err != nil { return false, err } - mntpointSt := toSysStatT(mntpoint.Sys()) - parentSt := toSysStatT(parent.Sys()) + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) return mntpointSt.Dev != parentSt.Dev, nil } + +type probeData struct { + fsName string + magic string + offset uint64 +} + +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + file.Close() + if uint64(l) != maxLen { + return "", fmt.Errorf("unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/daemon/graphdriver/devmapper/sys.go b/daemon/graphdriver/devmapper/sys.go deleted file mode 100644 index 5a9ab4d74b..0000000000 --- a/daemon/graphdriver/devmapper/sys.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "os" - "os/exec" - "syscall" -) - -type ( - sysStatT syscall.Stat_t - sysErrno syscall.Errno - - osFile struct{ *os.File } -) - -var ( - sysMount = syscall.Mount - sysUnmount = syscall.Unmount - sysCloseOnExec = syscall.CloseOnExec - sysSyscall = syscall.Syscall - - osOpenFile = func(name string, flag int, perm os.FileMode) (*osFile, error) { - f, err := os.OpenFile(name, flag, perm) - return &osFile{File: f}, err - } - osOpen = func(name string) (*osFile, error) { f, err := os.Open(name); return &osFile{File: f}, err } - osNewFile = os.NewFile - osCreate = os.Create - osStat = os.Stat - osIsNotExist = os.IsNotExist - osIsExist = os.IsExist - osMkdirAll = os.MkdirAll - osRemoveAll = os.RemoveAll - osRename = os.Rename - osReadlink = os.Readlink - - execRun = func(name string, args ...string) error { return exec.Command(name, args...).Run() } -) - -const ( - sysMsMgcVal = syscall.MS_MGC_VAL - sysMsRdOnly = syscall.MS_RDONLY - sysEInval = syscall.EINVAL - sysSysIoctl = syscall.SYS_IOCTL - sysEBusy = syscall.EBUSY - - osORdOnly = os.O_RDONLY - osORdWr = os.O_RDWR - osOCreate = os.O_CREATE - osModeDevice = os.ModeDevice -) - -func toSysStatT(i interface{}) *sysStatT { - return (*sysStatT)(i.(*syscall.Stat_t)) -} diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index 80bf8a0143..4536489706 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -1,14 +1,23 @@ package graphdriver import ( + "errors" "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/utils" "os" "path" + + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/pkg/mount" ) -type InitFunc func(root string) (Driver, error) +type FsMagic uint64 + +const ( + FsMagicBtrfs = FsMagic(0x9123683E) + FsMagicAufs = FsMagic(0x61756673) +) + +type InitFunc func(root string, options []string) (Driver, error) type Driver interface { String() string @@ -43,6 +52,10 @@ var ( "devicemapper", "vfs", } + + ErrNotSupported = errors.New("driver not supported") + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") ) func init() { @@ -58,35 +71,56 @@ func Register(name string, initFunc InitFunc) error { return nil } -func GetDriver(name, home string) (Driver, error) { +func GetDriver(name, home string, options []string) (Driver, error) { if initFunc, exists := drivers[name]; exists { - return initFunc(path.Join(home, name)) + return initFunc(path.Join(home, name), options) } - return nil, fmt.Errorf("No such driver: %s", name) + return nil, ErrNotSupported } -func New(root string) (driver Driver, err error) { +func New(root string, options []string) (driver Driver, err error) { for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { if name != "" { - return GetDriver(name, root) + return GetDriver(name, root, options) } } // Check for priority drivers first for _, name := range priority { - if driver, err = GetDriver(name, root); err != nil { - utils.Debugf("Error loading driver %s: %s", name, err) - continue + driver, err = GetDriver(name, root, options) + if err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err } return driver, nil } // Check all registered drivers if no priority driver is found for _, initFunc := range drivers { - if driver, err = initFunc(root); err != nil { - continue + if driver, err = initFunc(root, options); err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err } return driver, nil } - return nil, err + return nil, fmt.Errorf("No supported storage backend found") +} + +func MakePrivate(mountPoint string) error { + mounted, err := mount.Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + + return mount.ForceMount("", mountPoint, "none", "private") } diff --git a/daemon/graphdriver/graphtest/graphtest.go b/daemon/graphdriver/graphtest/graphtest.go new file mode 100644 index 0000000000..d53878c45a --- /dev/null +++ b/daemon/graphdriver/graphtest/graphtest.go @@ -0,0 +1,228 @@ +package graphtest + +import ( + "github.com/dotcloud/docker/daemon/graphdriver" + "io/ioutil" + "os" + "path" + "syscall" + "testing" +) + +var ( + drv *Driver +) + +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t *testing.T, name string) *Driver { + root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, root, nil) + if err != nil { + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites { + t.Skip("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t *testing.T, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +func GetDriver(t *testing.T, name string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name) + } else { + drv.refCount++ + } + return drv +} + +func PutDriver(t *testing.T) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatal("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatal("%s not owned by gid %d", path, gid) + } + } + +} + +// Creates an new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + if err := driver.Create("empty", ""); err != nil { + t.Fatal(err) + } + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") + + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + +} + +func createBase(t *testing.T, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.Create(name, ""); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} + +func DriverTestCreateBase(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + verifyBase(t, driver, "Base") + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} + +func DriverTestCreateSnap(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + + if err := driver.Create("Snap", "Base"); err != nil { + t.Fatal(err) + } + + verifyBase(t, driver, "Snap") + + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go index 765b21cded..992af0e149 100644 --- a/daemon/graphdriver/vfs/driver.go +++ b/daemon/graphdriver/vfs/driver.go @@ -12,7 +12,7 @@ func init() { graphdriver.Register("vfs", Init) } -func Init(home string) (graphdriver.Driver, error) { +func Init(home string, options []string) (graphdriver.Driver, error) { d := &Driver{ home: home, } @@ -47,7 +47,7 @@ func (d *Driver) Create(id, parent string) error { if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { return err } - if err := os.Mkdir(dir, 0700); err != nil { + if err := os.Mkdir(dir, 0755); err != nil { return err } if parent == "" { diff --git a/daemon/graphdriver/vfs/vfs_test.go b/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 0000000000..e79f93c91d --- /dev/null +++ b/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,28 @@ +package vfs + +import ( + "github.com/dotcloud/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/history.go b/daemon/history.go index 57a00a2090..0b125ad2b3 100644 --- a/daemon/history.go +++ b/daemon/history.go @@ -26,5 +26,8 @@ func (history *History) Swap(i, j int) { func (history *History) Add(container *Container) { *history = append(*history, container) +} + +func (history *History) Sort() { sort.Sort(history) } diff --git a/daemon/inspect.go b/daemon/inspect.go new file mode 100644 index 0000000000..4da09d5449 --- /dev/null +++ b/daemon/inspect.go @@ -0,0 +1,54 @@ +package daemon + +import ( + "encoding/json" + + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" +) + +func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + if job.GetenvBool("dirty") { + b, err := json.Marshal(&struct { + *Container + HostConfig *runconfig.HostConfig + }{container, container.HostConfig()}) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK + } + + out := &engine.Env{} + out.Set("Id", container.ID) + out.SetAuto("Created", container.Created) + out.Set("Path", container.Path) + out.SetList("Args", container.Args) + out.SetJson("Config", container.Config) + out.SetJson("State", container.State) + out.Set("Image", container.Image) + out.SetJson("NetworkSettings", container.NetworkSettings) + out.Set("ResolvConfPath", container.ResolvConfPath) + out.Set("HostnamePath", container.HostnamePath) + out.Set("HostsPath", container.HostsPath) + out.Set("Name", container.Name) + out.Set("Driver", container.Driver) + out.Set("ExecDriver", container.ExecDriver) + out.Set("MountLabel", container.MountLabel) + out.Set("ProcessLabel", container.ProcessLabel) + out.SetJson("Volumes", container.Volumes) + out.SetJson("VolumesRW", container.VolumesRW) + out.SetJson("HostConfig", container.hostConfig) + if _, err := out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff --git a/daemon/network_settings.go b/daemon/network_settings.go index 762270362b..a5c750acfe 100644 --- a/daemon/network_settings.go +++ b/daemon/network_settings.go @@ -23,7 +23,7 @@ func (settings *NetworkSettings) PortMappingAPI() *engine.Table { p, _ := nat.ParsePort(port.Port()) if len(bindings) == 0 { out := &engine.Env{} - out.SetInt("PublicPort", p) + out.SetInt("PrivatePort", p) out.Set("Type", port.Proto()) outs.Add(out) continue diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index c64aa423d1..a960aead61 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -6,6 +6,7 @@ import ( "log" "net" "strings" + "sync" "github.com/dotcloud/docker/daemon/networkdriver" "github.com/dotcloud/docker/daemon/networkdriver/ipallocator" @@ -28,6 +29,24 @@ type networkInterface struct { PortMappings []net.Addr // there are mappings to the host interfaces } +type ifaces struct { + c map[string]*networkInterface + sync.Mutex +} + +func (i *ifaces) Set(key string, n *networkInterface) { + i.Lock() + i.c[key] = n + i.Unlock() +} + +func (i *ifaces) Get(key string) *networkInterface { + i.Lock() + res := i.c[key] + i.Unlock() + return res +} + var ( addrs = []string{ // Here we don't follow the convention of using the 1st IP of the range for the gateway. @@ -53,7 +72,7 @@ var ( bridgeNetwork *net.IPNet defaultBindingIP = net.ParseIP("0.0.0.0") - currentInterfaces = make(map[string]*networkInterface) + currentInterfaces = ifaces{c: make(map[string]*networkInterface)} ) func InitDriver(job *engine.Job) engine.Status { @@ -321,9 +340,9 @@ func Allocate(job *engine.Job) engine.Status { size, _ := bridgeNetwork.Mask.Size() out.SetInt("IPPrefixLen", size) - currentInterfaces[id] = &networkInterface{ + currentInterfaces.Set(id, &networkInterface{ IP: *ip, - } + }) out.WriteTo(job.Stdout) @@ -334,7 +353,7 @@ func Allocate(job *engine.Job) engine.Status { func Release(job *engine.Job) engine.Status { var ( id = job.Args[0] - containerInterface = currentInterfaces[id] + containerInterface = currentInterfaces.Get(id) ip net.IP port int proto string @@ -380,39 +399,55 @@ func AllocatePort(job *engine.Job) engine.Status { ip = defaultBindingIP id = job.Args[0] hostIP = job.Getenv("HostIP") - hostPort = job.GetenvInt("HostPort") + origHostPort = job.GetenvInt("HostPort") containerPort = job.GetenvInt("ContainerPort") proto = job.Getenv("Proto") - network = currentInterfaces[id] + network = currentInterfaces.Get(id) ) if hostIP != "" { ip = net.ParseIP(hostIP) } - // host ip, proto, and host port - hostPort, err = portallocator.RequestPort(ip, proto, hostPort) - if err != nil { - return job.Error(err) - } - var ( + hostPort int container net.Addr host net.Addr ) - if proto == "tcp" { - host = &net.TCPAddr{IP: ip, Port: hostPort} - container = &net.TCPAddr{IP: network.IP, Port: containerPort} - } else { - host = &net.UDPAddr{IP: ip, Port: hostPort} - container = &net.UDPAddr{IP: network.IP, Port: containerPort} + /* + Try up to 10 times to get a port that's not already allocated. + + In the event of failure to bind, return the error that portmapper.Map + yields. + */ + for i := 0; i < 10; i++ { + // host ip, proto, and host port + hostPort, err = portallocator.RequestPort(ip, proto, origHostPort) + + if err != nil { + return job.Error(err) + } + + if proto == "tcp" { + host = &net.TCPAddr{IP: ip, Port: hostPort} + container = &net.TCPAddr{IP: network.IP, Port: containerPort} + } else { + host = &net.UDPAddr{IP: ip, Port: hostPort} + container = &net.UDPAddr{IP: network.IP, Port: containerPort} + } + + if err = portmapper.Map(container, ip, hostPort); err == nil { + break + } + + job.Logf("Failed to bind %s:%d for container address %s:%d. Trying another port.", ip.String(), hostPort, network.IP.String(), containerPort) } - if err := portmapper.Map(container, ip, hostPort); err != nil { - portallocator.ReleasePort(ip, proto, hostPort) + if err != nil { return job.Error(err) } + network.PortMappings = append(network.PortMappings, host) out := engine.Env{} diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go index 914df34942..f154b0bd49 100644 --- a/daemon/networkdriver/ipallocator/allocator.go +++ b/daemon/networkdriver/ipallocator/allocator.go @@ -4,12 +4,21 @@ import ( "encoding/binary" "errors" "github.com/dotcloud/docker/daemon/networkdriver" - "github.com/dotcloud/docker/pkg/collections" "net" "sync" ) -type networkSet map[string]*collections.OrderedIntSet +// allocatedMap is thread-unsafe set of allocated IP +type allocatedMap struct { + p map[int32]struct{} + last int32 +} + +func newAllocatedMap() *allocatedMap { + return &allocatedMap{p: make(map[int32]struct{})} +} + +type networkSet map[string]*allocatedMap var ( ErrNoAvailableIPs = errors.New("no available ip addresses on network") @@ -19,92 +28,74 @@ var ( var ( lock = sync.Mutex{} allocatedIPs = networkSet{} - availableIPS = networkSet{} ) // RequestIP requests an available ip from the given network. It // will return the next available ip if the ip provided is nil. If the // ip provided is not nil it will validate that the provided ip is available // for use or return an error -func RequestIP(address *net.IPNet, ip *net.IP) (*net.IP, error) { +func RequestIP(network *net.IPNet, ip *net.IP) (*net.IP, error) { lock.Lock() defer lock.Unlock() - - checkAddress(address) + key := network.String() + allocated, ok := allocatedIPs[key] + if !ok { + allocated = newAllocatedMap() + allocatedIPs[key] = allocated + } if ip == nil { - next, err := getNextIp(address) - if err != nil { - return nil, err - } - return next, nil + return allocated.getNextIP(network) } - - if err := registerIP(address, ip); err != nil { - return nil, err - } - return ip, nil + return allocated.checkIP(network, ip) } // ReleaseIP adds the provided ip back into the pool of // available ips to be returned for use. -func ReleaseIP(address *net.IPNet, ip *net.IP) error { +func ReleaseIP(network *net.IPNet, ip *net.IP) error { lock.Lock() defer lock.Unlock() - - checkAddress(address) - - var ( - existing = allocatedIPs[address.String()] - available = availableIPS[address.String()] - pos = getPosition(address, ip) - ) - - existing.Remove(int(pos)) - available.Push(int(pos)) - + if allocated, exists := allocatedIPs[network.String()]; exists { + pos := getPosition(network, ip) + delete(allocated.p, pos) + } return nil } // convert the ip into the position in the subnet. Only // position are saved in the set -func getPosition(address *net.IPNet, ip *net.IP) int32 { - var ( - first, _ = networkdriver.NetworkRange(address) - base = ipToInt(&first) - i = ipToInt(ip) - ) - return i - base +func getPosition(network *net.IPNet, ip *net.IP) int32 { + first, _ := networkdriver.NetworkRange(network) + return ipToInt(ip) - ipToInt(&first) +} + +func (allocated *allocatedMap) checkIP(network *net.IPNet, ip *net.IP) (*net.IP, error) { + pos := getPosition(network, ip) + if _, ok := allocated.p[pos]; ok { + return nil, ErrIPAlreadyAllocated + } + allocated.p[pos] = struct{}{} + allocated.last = pos + return ip, nil } // return an available ip if one is currently available. If not, // return the next available ip for the nextwork -func getNextIp(address *net.IPNet) (*net.IP, error) { +func (allocated *allocatedMap) getNextIP(network *net.IPNet) (*net.IP, error) { var ( - ownIP = ipToInt(&address.IP) - available = availableIPS[address.String()] - allocated = allocatedIPs[address.String()] - first, _ = networkdriver.NetworkRange(address) - base = ipToInt(&first) - size = int(networkdriver.NetworkSize(address.Mask)) - max = int32(size - 2) // size -1 for the broadcast address, -1 for the gateway address - pos = int32(available.Pop()) + ownIP = ipToInt(&network.IP) + first, _ = networkdriver.NetworkRange(network) + base = ipToInt(&first) + size = int(networkdriver.NetworkSize(network.Mask)) + max = int32(size - 2) // size -1 for the broadcast network, -1 for the gateway network + pos = allocated.last ) - // We pop and push the position not the ip - if pos != 0 { - ip := intToIP(int32(base + pos)) - allocated.Push(int(pos)) - - return ip, nil - } - var ( - firstNetIP = address.IP.To4().Mask(address.Mask) + firstNetIP = network.IP.To4().Mask(network.Mask) firstAsInt = ipToInt(&firstNetIP) + 1 ) - pos = int32(allocated.PullBack()) for i := int32(0); i < max; i++ { pos = pos%max + 1 next := int32(base + pos) @@ -112,31 +103,16 @@ func getNextIp(address *net.IPNet) (*net.IP, error) { if next == ownIP || next == firstAsInt { continue } - - if !allocated.Exists(int(pos)) { - ip := intToIP(next) - allocated.Push(int(pos)) - return ip, nil + if _, ok := allocated.p[pos]; ok { + continue } + allocated.p[pos] = struct{}{} + allocated.last = pos + return intToIP(next), nil } return nil, ErrNoAvailableIPs } -func registerIP(address *net.IPNet, ip *net.IP) error { - var ( - existing = allocatedIPs[address.String()] - available = availableIPS[address.String()] - pos = getPosition(address, ip) - ) - - if existing.Exists(int(pos)) { - return ErrIPAlreadyAllocated - } - available.Remove(int(pos)) - - return nil -} - // Converts a 4 bytes IP into a 32 bit integer func ipToInt(ip *net.IP) int32 { return int32(binary.BigEndian.Uint32(ip.To4())) @@ -149,11 +125,3 @@ func intToIP(n int32) *net.IP { ip := net.IP(b) return &ip } - -func checkAddress(address *net.IPNet) { - key := address.String() - if _, exists := allocatedIPs[key]; !exists { - allocatedIPs[key] = collections.NewOrderedIntSet() - availableIPS[key] = collections.NewOrderedIntSet() - } -} diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/daemon/networkdriver/ipallocator/allocator_test.go index 5e9fcfc983..6897a0a44b 100644 --- a/daemon/networkdriver/ipallocator/allocator_test.go +++ b/daemon/networkdriver/ipallocator/allocator_test.go @@ -8,7 +8,6 @@ import ( func reset() { allocatedIPs = networkSet{} - availableIPS = networkSet{} } func TestRequestNewIps(t *testing.T) { @@ -18,8 +17,10 @@ func TestRequestNewIps(t *testing.T) { Mask: []byte{255, 255, 255, 0}, } + var ip *net.IP + var err error for i := 2; i < 10; i++ { - ip, err := RequestIP(network, nil) + ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } @@ -28,6 +29,17 @@ func TestRequestNewIps(t *testing.T) { t.Fatalf("Expected ip %s got %s", expected, ip.String()) } } + value := intToIP(ipToInt(ip) + 1).String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + if ip.String() != value { + t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String()) + } } func TestReleaseIp(t *testing.T) { @@ -64,6 +76,17 @@ func TestGetReleasedIp(t *testing.T) { t.Fatal(err) } + for i := 0; i < 252; i++ { + _, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + err = ReleaseIP(network, ip) + if err != nil { + t.Fatal(err) + } + } + ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) @@ -185,24 +208,6 @@ func TestIPAllocator(t *testing.T) { newIPs[i] = ip } - // Before loop begin - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 0 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(f) - // ↑ - - // After i = 1 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(u) - // ↑ - - // After i = 2 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) - // ↑ - - // Reordered these because the new set will always return the - // lowest ips first and not in the order that they were released assertIPEquals(t, &expectedIPs[2], newIPs[0]) assertIPEquals(t, &expectedIPs[3], newIPs[1]) assertIPEquals(t, &expectedIPs[4], newIPs[2]) @@ -234,8 +239,105 @@ func TestAllocateFirstIP(t *testing.T) { } } +func TestAllocateAllIps(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + var ( + current, first *net.IP + err error + isFirst = true + ) + + for err == nil { + current, err = RequestIP(network, nil) + if isFirst { + first = current + isFirst = false + } + } + + if err != ErrNoAvailableIPs { + t.Fatal(err) + } + + if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { + t.Fatal(err) + } + + if err := ReleaseIP(network, first); err != nil { + t.Fatal(err) + } + + again, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, first, again) +} + +func TestAllocateDifferentSubnets(t *testing.T) { + defer reset() + network1 := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + network2 := &net.IPNet{ + IP: []byte{127, 0, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + expectedIPs := []net.IP{ + 0: net.IPv4(192, 168, 0, 2), + 1: net.IPv4(192, 168, 0, 3), + 2: net.IPv4(127, 0, 0, 2), + 3: net.IPv4(127, 0, 0, 3), + } + + ip11, err := RequestIP(network1, nil) + if err != nil { + t.Fatal(err) + } + ip12, err := RequestIP(network1, nil) + if err != nil { + t.Fatal(err) + } + ip21, err := RequestIP(network2, nil) + if err != nil { + t.Fatal(err) + } + ip22, err := RequestIP(network2, nil) + if err != nil { + t.Fatal(err) + } + assertIPEquals(t, &expectedIPs[0], ip11) + assertIPEquals(t, &expectedIPs[1], ip12) + assertIPEquals(t, &expectedIPs[2], ip21) + assertIPEquals(t, &expectedIPs[3], ip22) +} + func assertIPEquals(t *testing.T, ip1, ip2 *net.IP) { if !ip1.Equal(*ip2) { t.Fatalf("Expected IP %s, got %s", ip1, ip2) } } + +func BenchmarkRequestIP(b *testing.B) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 253; j++ { + _, err := RequestIP(network, nil) + if err != nil { + b.Fatal(err) + } + } + reset() + } +} diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go index 9ecd447116..251ab94473 100644 --- a/daemon/networkdriver/portallocator/portallocator.go +++ b/daemon/networkdriver/portallocator/portallocator.go @@ -2,21 +2,21 @@ package portallocator import ( "errors" - "github.com/dotcloud/docker/pkg/collections" "net" "sync" ) +type ( + portMap map[int]bool + protocolMap map[string]portMap + ipMapping map[string]protocolMap +) + const ( BeginPortRange = 49153 EndPortRange = 65535 ) -type ( - portMappings map[string]*collections.OrderedIntSet - ipMapping map[string]portMappings -) - var ( ErrAllPortsAllocated = errors.New("all ports are allocated") ErrPortAlreadyAllocated = errors.New("port has already been allocated") @@ -24,165 +24,106 @@ var ( ) var ( - currentDynamicPort = map[string]int{ - "tcp": BeginPortRange - 1, - "udp": BeginPortRange - 1, - } - defaultIP = net.ParseIP("0.0.0.0") - defaultAllocatedPorts = portMappings{} - otherAllocatedPorts = ipMapping{} - lock = sync.Mutex{} + mutex sync.Mutex + + defaultIP = net.ParseIP("0.0.0.0") + globalMap = ipMapping{} ) -func init() { - defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() - defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() -} - -// RequestPort returns an available port if the port is 0 -// If the provided port is not 0 then it will be checked if -// it is available for allocation func RequestPort(ip net.IP, proto string, port int) (int, error) { - lock.Lock() - defer lock.Unlock() + mutex.Lock() + defer mutex.Unlock() - if err := validateProtocol(proto); err != nil { + if err := validateProto(proto); err != nil { return 0, err } - // If the user requested a specific port to be allocated + ip = getDefault(ip) + + mapping := getOrCreate(ip) + if port > 0 { - if err := registerSetPort(ip, proto, port); err != nil { + if !mapping[proto][port] { + mapping[proto][port] = true + return port, nil + } else { + return 0, ErrPortAlreadyAllocated + } + } else { + port, err := findPort(ip, proto) + + if err != nil { return 0, err } + return port, nil } - return registerDynamicPort(ip, proto) } -// ReleasePort will return the provided port back into the -// pool for reuse func ReleasePort(ip net.IP, proto string, port int) error { - lock.Lock() - defer lock.Unlock() + mutex.Lock() + defer mutex.Unlock() - if err := validateProtocol(proto); err != nil { - return err - } + ip = getDefault(ip) - allocated := defaultAllocatedPorts[proto] - allocated.Remove(port) + mapping := getOrCreate(ip) + delete(mapping[proto], port) - if !equalsDefault(ip) { - registerIP(ip) - - // Remove the port for the specific ip address - allocated = otherAllocatedPorts[ip.String()][proto] - allocated.Remove(port) - } return nil } func ReleaseAll() error { - lock.Lock() - defer lock.Unlock() + mutex.Lock() + defer mutex.Unlock() - currentDynamicPort["tcp"] = BeginPortRange - 1 - currentDynamicPort["udp"] = BeginPortRange - 1 - - defaultAllocatedPorts = portMappings{} - defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() - defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() - - otherAllocatedPorts = ipMapping{} + globalMap = ipMapping{} return nil } -func registerDynamicPort(ip net.IP, proto string) (int, error) { +func getOrCreate(ip net.IP) protocolMap { + ipstr := ip.String() - if !equalsDefault(ip) { - registerIP(ip) - - ipAllocated := otherAllocatedPorts[ip.String()][proto] - - port, err := findNextPort(proto, ipAllocated) - if err != nil { - return 0, err + if _, ok := globalMap[ipstr]; !ok { + globalMap[ipstr] = protocolMap{ + "tcp": portMap{}, + "udp": portMap{}, } - ipAllocated.Push(port) - return port, nil - - } else { - - allocated := defaultAllocatedPorts[proto] - - port, err := findNextPort(proto, allocated) - if err != nil { - return 0, err - } - allocated.Push(port) - return port, nil - } -} - -func registerSetPort(ip net.IP, proto string, port int) error { - allocated := defaultAllocatedPorts[proto] - if allocated.Exists(port) { - return ErrPortAlreadyAllocated } - if !equalsDefault(ip) { - registerIP(ip) - - ipAllocated := otherAllocatedPorts[ip.String()][proto] - if ipAllocated.Exists(port) { - return ErrPortAlreadyAllocated - } - ipAllocated.Push(port) - } else { - allocated.Push(port) - } - return nil + return globalMap[ipstr] } -func equalsDefault(ip net.IP) bool { - return ip == nil || ip.Equal(defaultIP) -} +func findPort(ip net.IP, proto string) (int, error) { + port := BeginPortRange -func findNextPort(proto string, allocated *collections.OrderedIntSet) (int, error) { - port := nextPort(proto) - startSearchPort := port - for allocated.Exists(port) { - port = nextPort(proto) - if startSearchPort == port { + mapping := getOrCreate(ip) + + for mapping[proto][port] { + port++ + + if port > EndPortRange { return 0, ErrAllPortsAllocated } } + + mapping[proto][port] = true + return port, nil } -func nextPort(proto string) int { - c := currentDynamicPort[proto] + 1 - if c > EndPortRange { - c = BeginPortRange +func getDefault(ip net.IP) net.IP { + if ip == nil { + return defaultIP } - currentDynamicPort[proto] = c - return c + + return ip } -func registerIP(ip net.IP) { - if _, exists := otherAllocatedPorts[ip.String()]; !exists { - otherAllocatedPorts[ip.String()] = portMappings{ - "tcp": collections.NewOrderedIntSet(), - "udp": collections.NewOrderedIntSet(), - } - } -} - -func validateProtocol(proto string) error { - if _, exists := defaultAllocatedPorts[proto]; !exists { +func validateProto(proto string) error { + if proto != "tcp" && proto != "udp" { return ErrUnknownProtocol } + return nil } diff --git a/daemon/state.go b/daemon/state.go index 562929c87a..7ee8fc48c3 100644 --- a/daemon/state.go +++ b/daemon/state.go @@ -2,14 +2,16 @@ package daemon import ( "fmt" - "github.com/dotcloud/docker/utils" "sync" "time" + + "github.com/dotcloud/docker/pkg/units" ) type State struct { sync.RWMutex Running bool + Paused bool Pid int ExitCode int StartedAt time.Time @@ -22,12 +24,15 @@ func (s *State) String() string { defer s.RUnlock() if s.Running { - return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } if s.FinishedAt.IsZero() { return "" } - return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, utils.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } func (s *State) IsRunning() bool { @@ -49,6 +54,7 @@ func (s *State) SetRunning(pid int) { defer s.Unlock() s.Running = true + s.Paused = false s.ExitCode = 0 s.Pid = pid s.StartedAt = time.Now().UTC() @@ -63,3 +69,22 @@ func (s *State) SetStopped(exitCode int) { s.FinishedAt = time.Now().UTC() s.ExitCode = exitCode } + +func (s *State) SetPaused() { + s.Lock() + defer s.Unlock() + s.Paused = true +} + +func (s *State) SetUnpaused() { + s.Lock() + defer s.Unlock() + s.Paused = false +} + +func (s *State) IsPaused() bool { + s.RLock() + defer s.RUnlock() + + return s.Paused +} diff --git a/daemon/utils.go b/daemon/utils.go index 15b62e2a06..d60d985152 100644 --- a/daemon/utils.go +++ b/daemon/utils.go @@ -2,10 +2,10 @@ package daemon import ( "fmt" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/pkg/namesgenerator" - "github.com/dotcloud/docker/runconfig" "strings" + + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/runconfig" ) func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { @@ -49,16 +49,3 @@ func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[ driverConfig["lxc"] = lxc } } - -type checker struct { - daemon *Daemon -} - -func (c *checker) Exists(name string) bool { - return c.daemon.containerGraph.Exists("/" + name) -} - -// Generate a random and unique name -func generateRandomName(daemon *Daemon) (string, error) { - return namesgenerator.GenerateRandomName(&checker{daemon}) -} diff --git a/daemon/volumes.go b/daemon/volumes.go index a15e3084b2..d9719369ac 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -10,7 +10,7 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/pkg/symlink" ) type BindMap struct { @@ -40,8 +40,11 @@ func setupMountsForContainer(container *Container) error { {container.ResolvConfPath, "/etc/resolv.conf", false, true}, } - if container.HostnamePath != "" && container.HostsPath != "" { + if container.HostnamePath != "" { mounts = append(mounts, execdriver.Mount{container.HostnamePath, "/etc/hostname", false, true}) + } + + if container.HostsPath != "" { mounts = append(mounts, execdriver.Mount{container.HostsPath, "/etc/hosts", false, true}) } @@ -94,13 +97,16 @@ func applyVolumesFrom(container *Container) error { if _, exists := container.Volumes[volPath]; exists { continue } - stat, err := os.Stat(filepath.Join(c.basefs, volPath)) + + stat, err := os.Stat(c.getResourcePath(volPath)) if err != nil { return err } - if err := createIfNotExists(filepath.Join(container.basefs, volPath), stat.IsDir()); err != nil { + + if err := createIfNotExists(container.getResourcePath(volPath), stat.IsDir()); err != nil { return err } + container.Volumes[volPath] = id if isRW, exists := c.VolumesRW[volPath]; exists { container.VolumesRW[volPath] = isRW && mountRW @@ -162,137 +168,169 @@ func createVolumes(container *Container) error { return err } - volumesDriver := container.daemon.volumes.Driver() // Create the requested volumes if they don't exist for volPath := range container.Config.Volumes { - volPath = filepath.Clean(volPath) - volIsDir := true - // Skip existing volumes - if _, exists := container.Volumes[volPath]; exists { - continue - } - var srcPath string - var isBindMount bool - srcRW := false - // If an external bind is defined for this volume, use that as a source - if bindMap, exists := binds[volPath]; exists { - isBindMount = true - srcPath = bindMap.SrcPath - if !filepath.IsAbs(srcPath) { - return fmt.Errorf("%s must be an absolute path", srcPath) - } - if strings.ToLower(bindMap.Mode) == "rw" { - srcRW = true - } - if stat, err := os.Stat(bindMap.SrcPath); err != nil { - return err - } else { - volIsDir = stat.IsDir() - } - // Otherwise create an directory in $ROOT/volumes/ and use that - } else { - - // Do not pass a container as the parameter for the volume creation. - // The graph driver using the container's information ( Image ) to - // create the parent. - c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil) - if err != nil { - return err - } - srcPath, err = volumesDriver.Get(c.ID, "") - if err != nil { - return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) - } - srcRW = true // RW by default - } - - if p, err := filepath.EvalSymlinks(srcPath); err != nil { - return err - } else { - srcPath = p - } - - // Create the mountpoint - rootVolPath, err := utils.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs) - if err != nil { + if err := initializeVolume(container, volPath, binds); err != nil { return err } + } - newVolPath, err := filepath.Rel(container.basefs, rootVolPath) - if err != nil { + for volPath := range binds { + if err := initializeVolume(container, volPath, binds); err != nil { return err } - newVolPath = "/" + newVolPath - - if volPath != newVolPath { - delete(container.Volumes, volPath) - delete(container.VolumesRW, volPath) - } - - container.Volumes[newVolPath] = srcPath - container.VolumesRW[newVolPath] = srcRW - - if err := createIfNotExists(rootVolPath, volIsDir); err != nil { - return err - } - - // Do not copy or change permissions if we are mounting from the host - if srcRW && !isBindMount { - volList, err := ioutil.ReadDir(rootVolPath) - if err != nil { - return err - } - if len(volList) > 0 { - srcList, err := ioutil.ReadDir(srcPath) - if err != nil { - return err - } - if len(srcList) == 0 { - // If the source volume is empty copy files from the root into the volume - if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { - return err - } - } - } - - var stat syscall.Stat_t - if err := syscall.Stat(rootVolPath, &stat); err != nil { - return err - } - var srcStat syscall.Stat_t - if err := syscall.Stat(srcPath, &srcStat); err != nil { - return err - } - // Change the source volume's ownership if it differs from the root - // files that were just copied - if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { - if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - } - } } return nil } -func createIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } else { - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - defer f.Close() +func createIfNotExists(destination string, isDir bool) error { + if _, err := os.Stat(destination); err != nil && os.IsNotExist(err) { + if isDir { + if err := os.MkdirAll(destination, 0755); err != nil { + return err } + } else { + if err := os.MkdirAll(filepath.Dir(destination), 0755); err != nil { + return err + } + + f, err := os.OpenFile(destination, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + + return nil +} + +func initializeVolume(container *Container, volPath string, binds map[string]BindMap) error { + volumesDriver := container.daemon.volumes.Driver() + volPath = filepath.Clean(volPath) + + // Skip existing volumes + if _, exists := container.Volumes[volPath]; exists { + return nil + } + + var ( + destination string + isBindMount bool + volIsDir = true + + srcRW = false + ) + + // If an external bind is defined for this volume, use that as a source + if bindMap, exists := binds[volPath]; exists { + isBindMount = true + destination = bindMap.SrcPath + + if !filepath.IsAbs(destination) { + return fmt.Errorf("%s must be an absolute path", destination) + } + + if strings.ToLower(bindMap.Mode) == "rw" { + srcRW = true + } + + if stat, err := os.Stat(bindMap.SrcPath); err != nil { + return err + } else { + volIsDir = stat.IsDir() + } + } else { + // Do not pass a container as the parameter for the volume creation. + // The graph driver using the container's information ( Image ) to + // create the parent. + c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil) + if err != nil { + return err + } + + destination, err = volumesDriver.Get(c.ID, "") + if err != nil { + return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) + } + + srcRW = true + } + + if p, err := filepath.EvalSymlinks(destination); err != nil { + return err + } else { + destination = p + } + + // Create the mountpoint + source, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs) + if err != nil { + return err + } + + newVolPath, err := filepath.Rel(container.basefs, source) + if err != nil { + return err + } + newVolPath = "/" + newVolPath + + if volPath != newVolPath { + delete(container.Volumes, volPath) + delete(container.VolumesRW, volPath) + } + + container.Volumes[newVolPath] = destination + container.VolumesRW[newVolPath] = srcRW + + if err := createIfNotExists(source, volIsDir); err != nil { + return err + } + + // Do not copy or change permissions if we are mounting from the host + if srcRW && !isBindMount { + if err := copyExistingContents(source, destination); err != nil { + return err } } return nil } + +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + + if len(srcList) == 0 { + // If the source volume is empty copy files from the root into the volume + if err := archive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// into the destination file +func copyOwnership(source, destination string) error { + var stat syscall.Stat_t + + if err := syscall.Stat(source, &stat); err != nil { + return err + } + + if err := os.Chown(destination, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode)) +} diff --git a/daemonconfig/README.md b/daemonconfig/README.md new file mode 100644 index 0000000000..488e7c7cac --- /dev/null +++ b/daemonconfig/README.md @@ -0,0 +1,3 @@ +This directory contains code pertaining to the configuration of the docker deamon + +These are the configuration settings that you pass to the docker daemon when you launch it with say: `docker -d -e lxc` diff --git a/daemonconfig/config.go b/daemonconfig/config.go index 619bfe582f..9f77d84a58 100644 --- a/daemonconfig/config.go +++ b/daemonconfig/config.go @@ -25,6 +25,7 @@ type Config struct { BridgeIP string InterContainerCommunication bool GraphDriver string + GraphOptions []string ExecDriver string Mtu int DisableNetwork bool @@ -49,6 +50,10 @@ func ConfigFromJob(job *engine.Job) *Config { ExecDriver: job.Getenv("ExecDriver"), EnableSelinuxSupport: job.GetenvBool("EnableSelinuxSupport"), } + if graphOpts := job.GetenvList("GraphOptions"); graphOpts != nil { + config.GraphOptions = graphOpts + } + if dns := job.GetenvList("Dns"); dns != nil { config.Dns = dns } diff --git a/docker/docker.go b/docker/docker.go index 60f34a1f14..56bcb04e41 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -41,6 +41,7 @@ func main() { var ( flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") + flGraphOpts opts.ListOpts flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers") bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") @@ -69,6 +70,7 @@ func main() { flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") + flag.Var(&flGraphOpts, []string{"-storage-opt"}, "Set storage driver options") flag.Parse() @@ -98,6 +100,9 @@ func main() { } if *flDaemon { + if runtime.GOOS != "linux" { + log.Fatalf("The Docker daemon is only supported on linux") + } if os.Geteuid() != 0 { log.Fatalf("The Docker daemon needs to be run as root") } @@ -153,6 +158,7 @@ func main() { job.Setenv("DefaultIp", *flDefaultIp) job.SetenvBool("InterContainerCommunication", *flInterContainerComm) job.Setenv("GraphDriver", *flGraphDriver) + job.SetenvList("GraphOptions", flGraphOpts.GetAll()) job.Setenv("ExecDriver", *flExecDriver) job.SetenvInt("Mtu", *flMtu) job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled) @@ -185,6 +191,7 @@ func main() { job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) + job.SetenvBool("BufferRequests", true) if err := job.Run(); err != nil { log.Fatal(err) } diff --git a/docs/Dockerfile b/docs/Dockerfile index a907072ddf..694729d89b 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -4,7 +4,7 @@ FROM debian:jessie MAINTAINER Sven Dowideit (@SvenDowideit) -RUN apt-get update && apt-get install -yq make python-pip python-setuptools vim-tiny git pandoc +RUN apt-get update && apt-get install -yq make python-pip python-setuptools vim-tiny git gettext RUN pip install mkdocs diff --git a/docs/README.md b/docs/README.md index 47b390bda4..fa3c501087 100755 --- a/docs/README.md +++ b/docs/README.md @@ -1,37 +1,35 @@ # Docker Documentation -The source for Docker documentation is here under `sources/` and uses -extended Markdown, as implemented by [mkdocs](http://mkdocs.org). +The source for Docker documentation is here under `sources/` and uses extended +Markdown, as implemented by [MkDocs](http://mkdocs.org). -The HTML files are built and hosted on `https://docs.docker.io`, and -update automatically after each change to the master or release branch -of [Docker on GitHub](https://github.com/dotcloud/docker) -thanks to post-commit hooks. The "docs" branch maps to the "latest" -documentation and the "master" (unreleased development) branch maps to -the "master" documentation. +The HTML files are built and hosted on `https://docs.docker.io`, and update +automatically after each change to the master or release branch of [Docker on +GitHub](https://github.com/dotcloud/docker) thanks to post-commit hooks. The +`docs` branch maps to the "latest" documentation and the `master` (unreleased +development) branch maps to the "master" documentation. ## Branches -**There are two branches related to editing docs**: `master` and a -`docs` branch. You should always edit documentation on a local branch -of the `master` branch, and send a PR against `master`. +**There are two branches related to editing docs**: `master` and a `docs` +branch. You should always edit documentation on a local branch of the `master` +branch, and send a PR against `master`. -That way your fixes will automatically get included in later releases, -and docs maintainers can easily cherry-pick your changes into the -`docs` release branch. In the rare case where your change is not -forward-compatible, you may need to base your changes on the `docs` -branch. +That way your fixes will automatically get included in later releases, and docs +maintainers can easily cherry-pick your changes into the `docs` release branch. +In the rare case where your change is not forward-compatible, you may need to +base your changes on the `docs` branch. Also, now that we have a `docs` branch, we can keep the -[http://docs.docker.io](http://docs.docker.io) docs up to date with any -bugs found between `docker` code releases. +[http://docs.docker.io](http://docs.docker.io) docs up to date with any bugs +found between Docker code releases. **Warning**: When *reading* the docs, the -[http://beta-docs.docker.io](http://beta-docs.docker.io) documentation -may include features not yet part of any official docker release. The -`beta-docs` site should be used only for understanding bleeding-edge -development and `docs.docker.io` (which points to the `docs` -branch`) should be used for the latest official release. +[http://beta-docs.docker.io](http://beta-docs.docker.io) documentation may +include features not yet part of any official Docker release. The `beta-docs` +site should be used only for understanding bleeding-edge development and +`docs.docker.io` (which points to the `docs` branch`) should be used for the +latest official release. ## Contributing @@ -41,59 +39,61 @@ branch`) should be used for the latest official release. ## Getting Started -Docker documentation builds are done in a Docker container, which -installs all the required tools, adds the local `docs/` directory and -builds the HTML docs. It then starts a HTTP server on port 8000 so that -you can connect and see your changes. +Docker documentation builds are done in a Docker container, which installs all +the required tools, adds the local `docs/` directory and builds the HTML docs. +It then starts a HTTP server on port 8000 so that you can connect and see your +changes. In the root of the `docker` source directory: make docs -If you have any issues you need to debug, you can use `make docs-shell` and -then run `mkdocs serve` +If you have any issues you need to debug, you can use `make docs-shell` and then +run `mkdocs serve` + +## Style guide + +The documentation is written with paragraphs wrapped at 80 colum lines to make +it easier for terminal use. ### Examples -When writing examples give the user hints by making them resemble what -they see in their shell: +When writing examples give the user hints by making them resemble what they see +in their shell: - Indent shell examples by 4 spaces so they get rendered as code. - Start typed commands with `$ ` (dollar space), so that they are easily -differentiated from program output. + differentiated from program output. - Program output has no prefix. - Comments begin with `# ` (hash space). - In-container shell commands begin with `$$ ` (dollar dollar space). ### Images -When you need to add images, try to make them as small as possible -(e.g. as gifs). Usually images should go in the same directory as the -`.md` file which references them, or in a subdirectory if one already -exists. +When you need to add images, try to make them as small as possible (e.g. as +gifs). Usually images should go in the same directory as the `.md` file which +references them, or in a subdirectory if one already exists. ## Working using GitHub's file editor -Alternatively, for small changes and typos you might want to use -GitHub's built in file editor. It allows you to preview your changes -right on-line (though there can be some differences between GitHub -Markdown and [MkDocs Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). -Just be careful not to create many commits. And you must still -[sign your work!](../CONTRIBUTING.md#sign-your-work) +Alternatively, for small changes and typos you might want to use GitHub's built +in file editor. It allows you to preview your changes right on-line (though +there can be some differences between GitHub Markdown and [MkDocs +Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). Just be +careful not to create many commits. And you must still [sign your +work!](../CONTRIBUTING.md#sign-your-work) ## Publishing Documentation -To publish a copy of the documentation you need a `docs/awsconfig` -file containing AWS settings to deploy to. The release script will +To publish a copy of the documentation you need a `docs/awsconfig` To make life +easier for file containing AWS settings to deploy to. The release script will create an s3 if needed, and will then push the files to it. - [profile dowideit-docs] - aws_access_key_id = IHOIUAHSIDH234rwf.... - aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... - region = ap-southeast-2 + [profile dowideit-docs] aws_access_key_id = IHOIUAHSIDH234rwf.... + aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... region = ap-southeast-2 -The `profile` name must be the same as the name of the bucket you are -deploying to - which you call from the `docker` directory: +The `profile` name must be the same as the name of the bucket you are deploying +to - which you call from the `docker` directory: make AWS_S3_BUCKET=dowideit-docs docs-release diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index c16436e892..2835cd9dde 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -28,15 +28,14 @@ pages: - ['index.md', 'About', 'Docker'] - ['introduction/index.md', '**HIDDEN**'] - ['introduction/understanding-docker.md', 'About', 'Understanding Docker'] -- ['introduction/technology.md', 'About', 'The Technology'] -- ['introduction/working-with-docker.md', 'About', 'Working with Docker'] -- ['introduction/get-docker.md', 'About', 'Get Docker'] # Installation: - ['installation/index.md', '**HIDDEN**'] - ['installation/mac.md', 'Installation', 'Mac OS X'] - ['installation/ubuntulinux.md', 'Installation', 'Ubuntu'] - ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux'] +- ['installation/centos.md', 'Installation', 'CentOS'] +- ['installation/debian.md', 'Installation', 'Debian'] - ['installation/gentoolinux.md', 'Installation', 'Gentoo'] - ['installation/google.md', 'Installation', 'Google Cloud Platform'] - ['installation/rackspace.md', 'Installation', 'Rackspace Cloud'] @@ -50,56 +49,55 @@ pages: - ['installation/windows.md', 'Installation', 'Microsoft Windows'] - ['installation/binaries.md', 'Installation', 'Binaries'] -# Examples: -- ['use/index.md', '**HIDDEN**'] -- ['use/basics.md', 'Examples', 'First steps with Docker'] -- ['examples/index.md', '**HIDDEN**'] -- ['examples/hello_world.md', 'Examples', 'Hello World'] -- ['examples/nodejs_web_app.md', 'Examples', 'Node.js web application'] -- ['examples/python_web_app.md', 'Examples', 'Python web application'] -- ['examples/mongodb.md', 'Examples', 'MongoDB service'] -- ['examples/running_redis_service.md', 'Examples', 'Redis service'] -- ['examples/postgresql_service.md', 'Examples', 'PostgreSQL service'] -- ['examples/running_riak_service.md', 'Examples', 'Running a Riak service'] -- ['examples/running_ssh_service.md', 'Examples', 'Running an SSH service'] -- ['examples/couchdb_data_volumes.md', 'Examples', 'CouchDB service'] -- ['examples/apt-cacher-ng.md', 'Examples', 'Apt-Cacher-ng service'] -- ['examples/https.md', 'Examples', 'Running Docker with HTTPS'] -- ['examples/using_supervisord.md', 'Examples', 'Using Supervisor'] -- ['examples/cfengine_process_management.md', 'Examples', 'Process management with CFEngine'] -- ['use/working_with_links_names.md', 'Examples', 'Linking containers together'] -- ['use/working_with_volumes.md', 'Examples', 'Sharing Directories using volumes'] -- ['use/puppet.md', 'Examples', 'Using Puppet'] -- ['use/chef.md', 'Examples', 'Using Chef'] -- ['use/workingwithrepository.md', 'Examples', 'Working with a Docker Repository'] -- ['use/port_redirection.md', 'Examples', 'Redirect ports'] -- ['use/ambassador_pattern_linking.md', 'Examples', 'Cross-Host linking using Ambassador Containers'] -- ['use/host_integration.md', 'Examples', 'Automatically starting Containers'] - -#- ['user-guide/index.md', '**HIDDEN**'] -# - ['user-guide/writing-your-docs.md', 'User Guide', 'Writing your docs'] -# - ['user-guide/styling-your-docs.md', 'User Guide', 'Styling your docs'] -# - ['user-guide/configuration.md', 'User Guide', 'Configuration'] -# ./faq.md +# User Guide: +- ['userguide/index.md', 'User Guide', 'The Docker User Guide' ] +- ['userguide/dockerio.md', 'User Guide', 'Getting Started with Docker.io' ] +- ['userguide/dockerizing.md', 'User Guide', 'Dockerizing Applications' ] +- ['userguide/usingdocker.md', 'User Guide', 'Working with Containers' ] +- ['userguide/dockerimages.md', 'User Guide', 'Working with Docker Images' ] +- ['userguide/dockerlinks.md', 'User Guide', 'Linking containers together' ] +- ['userguide/dockervolumes.md', 'User Guide', 'Managing data in containers' ] +- ['userguide/dockerrepos.md', 'User Guide', 'Working with Docker.io' ] # Docker.io docs: -- ['docker-io/index.md', '**HIDDEN**'] -# - ['index/home.md', 'Docker Index', 'Help'] +- ['docker-io/index.md', 'Docker.io', 'Docker.io' ] - ['docker-io/accounts.md', 'Docker.io', 'Accounts'] - ['docker-io/repos.md', 'Docker.io', 'Repositories'] -- ['docker-io/builds.md', 'Docker.io', 'Trusted Builds'] +- ['docker-io/builds.md', 'Docker.io', 'Automated Builds'] + +# Examples: +- ['examples/index.md', '**HIDDEN**'] +- ['examples/nodejs_web_app.md', 'Examples', 'Dockerizing a Node.js web application'] +- ['examples/mongodb.md', 'Examples', 'Dockerizing MongoDB'] +- ['examples/running_redis_service.md', 'Examples', 'Dockerizing a Redis service'] +- ['examples/postgresql_service.md', 'Examples', 'Dockerizing a PostgreSQL service'] +- ['examples/running_riak_service.md', 'Examples', 'Dockerizing a Riak service'] +- ['examples/running_ssh_service.md', 'Examples', 'Dockerizing an SSH service'] +- ['examples/couchdb_data_volumes.md', 'Examples', 'Dockerizing a CouchDB service'] +- ['examples/apt-cacher-ng.md', 'Examples', 'Dockerizing an Apt-Cacher-ng service'] + +# Articles +- ['articles/index.md', '**HIDDEN**'] +- ['articles/basics.md', 'Articles', 'Docker basics'] +- ['articles/networking.md', 'Articles', 'Advanced networking'] +- ['articles/security.md', 'Articles', 'Security'] +- ['articles/https.md', 'Articles', 'Running Docker with HTTPS'] +- ['articles/host_integration.md', 'Articles', 'Automatically starting Containers'] +- ['articles/using_supervisord.md', 'Articles', 'Using Supervisor'] +- ['articles/cfengine_process_management.md', 'Articles', 'Process management with CFEngine'] +- ['articles/puppet.md', 'Articles', 'Using Puppet'] +- ['articles/chef.md', 'Articles', 'Using Chef'] +- ['articles/ambassador_pattern_linking.md', 'Articles', 'Cross-Host linking using Ambassador Containers'] +- ['articles/runmetrics.md', 'Articles', 'Runtime metrics'] +- ['articles/baseimages.md', 'Articles', 'Creating a Base Image'] # Reference - ['reference/index.md', '**HIDDEN**'] - ['reference/commandline/index.md', '**HIDDEN**'] - ['reference/commandline/cli.md', 'Reference', 'Command line'] - ['reference/builder.md', 'Reference', 'Dockerfile'] +- ['faq.md', 'Reference', 'FAQ'] - ['reference/run.md', 'Reference', 'Run Reference'] -- ['articles/index.md', '**HIDDEN**'] -- ['articles/runmetrics.md', 'Reference', 'Runtime metrics'] -- ['articles/security.md', 'Reference', 'Security'] -- ['articles/baseimages.md', 'Reference', 'Creating a Base Image'] -- ['use/networking.md', 'Reference', 'Advanced networking'] - ['reference/api/index.md', '**HIDDEN**'] - ['reference/api/docker-io_api.md', 'Reference', 'Docker.io API'] - ['reference/api/registry_api.md', 'Reference', 'Docker Registry API'] @@ -133,9 +131,6 @@ pages: - ['terms/filesystem.md', '**HIDDEN**'] - ['terms/image.md', '**HIDDEN**'] -# TODO: our theme adds a dropdown even for sections that have no subsections. - #- ['faq.md', 'FAQ'] - # Contribute: - ['contributing/index.md', '**HIDDEN**'] - ['contributing/contributing.md', 'Contribute', 'Contributing'] diff --git a/docs/release.sh b/docs/release.sh index 323887f594..1be6268d70 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -19,7 +19,7 @@ EOF [ "$AWS_S3_BUCKET" ] || usage #VERSION=$(cat VERSION) -BUCKET=$AWS_S3_BUCKET +export BUCKET=$AWS_S3_BUCKET export AWS_CONFIG_FILE=$(pwd)/awsconfig [ -e "$AWS_CONFIG_FILE" ] || usage @@ -37,7 +37,10 @@ setup_s3() { # Make the bucket accessible through website endpoints. echo "make $BUCKET accessible as a website" #aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html - s3conf=$(cat s3_website.json) + s3conf=$(cat s3_website.json | envsubst) + echo + echo $s3conf + echo aws s3api put-bucket-website --bucket $BUCKET --website-configuration "$s3conf" } @@ -54,7 +57,7 @@ upload_current_documentation() { echo " to $dst" echo #s3cmd --recursive --follow-symlinks --preserve --acl-public sync "$src" "$dst" - aws s3 sync --acl public-read --exclude "*.rej" --exclude "*.rst" --exclude "*.orig" --exclude "*.py" "$src" "$dst" + aws s3 sync --cache-control "max-age=3600" --acl public-read --exclude "*.rej" --exclude "*.rst" --exclude "*.orig" --exclude "*.py" "$src" "$dst" } setup_s3 diff --git a/docs/s3_website.json b/docs/s3_website.json index fb14628ce6..eab6ae820c 100644 --- a/docs/s3_website.json +++ b/docs/s3_website.json @@ -6,12 +6,19 @@ "Suffix": "index.html" }, "RoutingRules": [ - { "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, - { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, - { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, - { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } }, - { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "ReplaceKeyPrefixWith": "docker-io/" } }, - { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } } + { "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "jsearch/" } }, + { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-io/" } }, + { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } }, + { "Condition": { "KeyPrefixEquals": "examples/hello_world/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } }, + { "Condition": { "KeyPrefixEquals": "examples/python_web_app/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } }, + { "Condition": { "KeyPrefixEquals": "use/working_with_volumes/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockervolumes/" } }, + { "Condition": { "KeyPrefixEquals": "use/working_with_links_names/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } }, + { "Condition": { "KeyPrefixEquals": "use/workingwithrepository/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerrepos/" } }, + { "Condition": { "KeyPrefixEquals": "use/port_redirection" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } }, + { "Condition": { "KeyPrefixEquals": "use/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "examples/" } } ] } diff --git a/docs/sources/articles.md b/docs/sources/articles.md index 54c067d0cc..51335c6afd 100644 --- a/docs/sources/articles.md +++ b/docs/sources/articles.md @@ -1,8 +1,13 @@ # Articles -## Contents: - + - [Docker Basics](basics/) - [Docker Security](security/) + - [Running the Docker daemon with HTTPS](https/) + - [Configure Networking](networking/) + - [Using Supervisor with Docker](using_supervisord/) + - [Process Management with CFEngine](cfengine_process_management/) + - [Using Puppet](puppet/) - [Create a Base Image](baseimages/) - [Runtime Metrics](runmetrics/) - + - [Automatically Start Containers](host_integration/) + - [Link via an Ambassador Container](ambassador_pattern_linking/) diff --git a/docs/sources/articles/ambassador_pattern_linking.md b/docs/sources/articles/ambassador_pattern_linking.md new file mode 100644 index 0000000000..755fa4dc9c --- /dev/null +++ b/docs/sources/articles/ambassador_pattern_linking.md @@ -0,0 +1,150 @@ +page_title: Link via an Ambassador Container +page_description: Using the Ambassador pattern to abstract (network) services +page_keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming + +# Link via an Ambassador Container + +## Introduction + +Rather than hardcoding network links between a service consumer and +provider, Docker encourages service portability, for example instead of: + + (consumer) --> (redis) + +Requiring you to restart the `consumer` to attach it to a different +`redis` service, you can add ambassadors: + + (consumer) --> (redis-ambassador) --> (redis) + +Or + + (consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis) + +When you need to rewire your consumer to talk to a different Redis +server, you can just restart the `redis-ambassador` container that the +consumer is connected to. + +This pattern also allows you to transparently move the Redis server to a +different docker host from the consumer. + +Using the `svendowideit/ambassador` container, the link wiring is +controlled entirely from the `docker run` parameters. + +## Two host Example + +Start actual Redis server on one Docker host + + big-server $ docker run -d --name redis crosbymichael/redis + +Then add an ambassador linked to the Redis server, mapping a port to the +outside world + + big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador + +On the other host, you can set up another ambassador setting environment +variables for each remote port we want to proxy to the `big-server` + + client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador + +Then on the `client-server` host, you can use a Redis client container +to talk to the remote Redis server, just by linking to the local Redis +ambassador. + + client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli + redis 172.17.0.160:6379> ping + PONG + +## How it works + +The following example shows what the `svendowideit/ambassador` container +does automatically (with a tiny amount of `sed`) + +On the Docker host (192.168.1.52) that Redis will run on: + + # start actual redis server + $ docker run -d --name redis crosbymichael/redis + + # get a redis-cli container for connection testing + $ docker pull relateiq/redis-cli + + # test the redis server by talking to it directly + $ docker run -t -i --rm --link redis:redis relateiq/redis-cli + redis 172.17.0.136:6379> ping + PONG + ^D + + # add redis ambassador + $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh + +In the `redis_ambassador` container, you can see the linked Redis +containers `env`: + + $ env + REDIS_PORT=tcp://172.17.0.136:6379 + REDIS_PORT_6379_TCP_ADDR=172.17.0.136 + REDIS_NAME=/redis_ambassador/redis + HOSTNAME=19d7adf4705e + REDIS_PORT_6379_TCP_PORT=6379 + HOME=/ + REDIS_PORT_6379_TCP_PROTO=tcp + container=lxc + REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379 + TERM=xterm + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + PWD=/ + +This environment is used by the ambassador `socat` script to expose Redis +to the world (via the `-p 6379:6379` port mapping): + + $ docker rm redis_ambassador + $ sudo ./contrib/mkimage-unittest.sh + $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh + + $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379 + +Now ping the Redis server via the ambassador: + +Now go to a different server: + + $ sudo ./contrib/mkimage-unittest.sh + $ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh + + $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379 + +And get the `redis-cli` image so we can talk over the ambassador bridge. + + $ docker pull relateiq/redis-cli + $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli + redis 172.17.0.160:6379> ping + PONG + +## The svendowideit/ambassador Dockerfile + +The `svendowideit/ambassador` image is a small `busybox` image with +`socat` built in. When you start the container, it uses a small `sed` +script to parse out the (possibly multiple) link environment variables +to set up the port forwarding. On the remote host, you need to set the +variable using the `-e` command line option. + + --expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379 + +Will forward the local `1234` port to the remote IP and port, in this +case `192.168.1.52:6379`. + + # + # + # first you need to build the docker-ut image + # using ./contrib/mkimage-unittest.sh + # then + # docker build -t SvenDowideit/ambassador . + # docker tag SvenDowideit/ambassador ambassador + # then to run it (on the host that has the real backend on it) + # docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador + # on the remote host, you can set up another ambassador + # docker run -t -i --name redis_ambassador --expose 6379 sh + + FROM docker-ut + MAINTAINER SvenDowideit@home.org.au + + + CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top diff --git a/docs/sources/use/basics.md b/docs/sources/articles/basics.md similarity index 70% rename from docs/sources/use/basics.md rename to docs/sources/articles/basics.md index ee3eeabd9d..cd4a9df652 100644 --- a/docs/sources/use/basics.md +++ b/docs/sources/articles/basics.md @@ -12,10 +12,10 @@ your Docker install, run the following command: # Check that you have a working install $ docker info -If you get `docker: command not found` or something -like `/var/lib/docker/repositories: permission denied` -you may have an incomplete docker installation or insufficient -privileges to access Docker on your machine. +If you get `docker: command not found` or something like +`/var/lib/docker/repositories: permission denied` you may have an +incomplete Docker installation or insufficient privileges to access +Docker on your machine. Please refer to [*Installation*](/installation/#installation-list) for installation instructions. @@ -26,9 +26,9 @@ for installation instructions. $ sudo docker pull ubuntu This will find the `ubuntu` image by name on -[*Docker.io*](../workingwithrepository/#find-public-images-on-dockerio) and -download it from [Docker.io](https://index.docker.io) to a local image -cache. +[*Docker.io*](/userguide/dockerrepos/#find-public-images-on-dockerio) +and download it from [Docker.io](https://index.docker.io) to a local +image cache. > **Note**: > When the image has successfully downloaded, you will see a 12 character @@ -50,7 +50,7 @@ cache. ## Bind Docker to another host/port or a Unix socket -> **Warning**: +> **Warning**: > Changing the default `docker` daemon binding to a > TCP port or Unix *docker* user group will increase your security risks > by allowing non-root users to gain *root* access on the host. Make sure @@ -58,48 +58,51 @@ cache. > to a TCP port, anyone with access to that port has full Docker access; > so it is not advisable on an open network. -With `-H` it is possible to make the Docker daemon -to listen on a specific IP and port. By default, it will listen on -`unix:///var/run/docker.sock` to allow only local -connections by the *root* user. You *could* set it to -`0.0.0.0:4243` or a specific host IP to give access -to everybody, but that is **not recommended** because then it is trivial -for someone to gain root access to the host where the daemon is running. +With `-H` it is possible to make the Docker daemon to listen on a +specific IP and port. By default, it will listen on +`unix:///var/run/docker.sock` to allow only local connections by the +*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP +to give access to everybody, but that is **not recommended** because +then it is trivial for someone to gain root access to the host where the +daemon is running. -Similarly, the Docker client can use `-H` to connect -to a custom port. +Similarly, the Docker client can use `-H` to connect to a custom port. -`-H` accepts host and port assignment in the -following format: `tcp://[host][:port]` or -`unix://path` +`-H` accepts host and port assignment in the following format: + + tcp://[host][:port]` or `unix://path For example: -- `tcp://host:4243` -> tcp connection on - host:4243 -- `unix://path/to/socket` -> unix socket located +- `tcp://host:2375` -> TCP connection on + host:2375 +- `unix://path/to/socket` -> Unix socket located at `path/to/socket` `-H`, when empty, will default to the same value as when no `-H` was passed in. `-H` also accepts short form for TCP bindings: -`host[:port]` or `:port` - # Run docker in daemon mode + host[:port]` or `:port + +Run Docker in daemon mode: + $ sudo /docker -H 0.0.0.0:5555 -d & - # Download an ubuntu image + +Download an `ubuntu` image: + $ sudo docker -H :5555 pull ubuntu -You can use multiple `-H`, for example, if you want -to listen on both TCP and a Unix socket +You can use multiple `-H`, for example, if you want to listen on both +TCP and a Unix socket # Run docker in daemon mode - $ sudo /docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -d & + $ sudo /docker -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock -d & # Download an ubuntu image, use default Unix socket $ sudo docker pull ubuntu # OR use the TCP port - $ sudo docker -H tcp://127.0.0.1:4243 pull ubuntu + $ sudo docker -H tcp://127.0.0.1:2375 pull ubuntu ## Starting a long-running worker process @@ -154,7 +157,7 @@ to listen on both TCP and a Unix socket ## Committing (saving) a container state -Save your containers state to a container image, so the state can be +Save your containers state to an image, so the state can be re-used. When you commit your container only the differences between the image @@ -168,8 +171,9 @@ will be stored (as a diff). See which images you already have using the # List your containers $ sudo docker images -You now have a image state from which you can create new instances. +You now have an image state from which you can create new instances. -Read more about [*Share Images via Repositories*]( -../workingwithrepository/#working-with-the-repository) or -continue to the complete [*Command Line*](/reference/commandline/cli/#cli) +Read more about [*Share Images via +Repositories*](/userguide/dockerrepos/#working-with-the-repository) or +continue to the complete [*Command +Line*](/reference/commandline/cli/#cli) diff --git a/docs/sources/examples/cfengine_process_management.md b/docs/sources/articles/cfengine_process_management.md similarity index 98% rename from docs/sources/examples/cfengine_process_management.md rename to docs/sources/articles/cfengine_process_management.md index 0c7b6a8a1f..ee5ba238a0 100644 --- a/docs/sources/examples/cfengine_process_management.md +++ b/docs/sources/articles/cfengine_process_management.md @@ -58,7 +58,7 @@ There are three steps: containerized CFEngine installation. 3. Start your application processes as part of the `docker run` command. -### Building the container image +### Building the image The first two steps can be done as part of a Dockerfile, as follows. @@ -87,7 +87,7 @@ The first two steps can be done as part of a Dockerfile, as follows. ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"] By saving this file as Dockerfile to a working directory, you can then build -your container with the docker build command, e.g. +your image with the docker build command, e.g. `docker build -t managed_image`. ### Testing the container diff --git a/docs/sources/use/chef.md b/docs/sources/articles/chef.md similarity index 92% rename from docs/sources/use/chef.md rename to docs/sources/articles/chef.md index 897c2b429a..5568e99afa 100644 --- a/docs/sources/use/chef.md +++ b/docs/sources/articles/chef.md @@ -19,8 +19,8 @@ operating systems. ## Installation The cookbook is available on the [Chef Community -Site](http://community.opscode.com/cookbooks/docker) and can be installed using -your favorite cookbook dependency manager. +Site](http://community.opscode.com/cookbooks/docker) and can be +installed using your favorite cookbook dependency manager. The source can be found on [GitHub](https://github.com/bflad/chef-docker). @@ -71,4 +71,4 @@ This is equivalent to running the following command, but under upstart: $ docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry The resources will accept a single string or an array of values for any -docker flags that allow multiple values. +Docker flags that allow multiple values. diff --git a/docs/sources/use/host_integration.md b/docs/sources/articles/host_integration.md similarity index 73% rename from docs/sources/use/host_integration.md rename to docs/sources/articles/host_integration.md index 370c00e20a..fa442620e5 100644 --- a/docs/sources/use/host_integration.md +++ b/docs/sources/articles/host_integration.md @@ -10,16 +10,15 @@ You can use your Docker containers with process managers like ## Introduction If you want a process manager to manage your containers you will need to -run the docker daemon with the `-r=false` so that -docker will not automatically restart your containers when the host is -restarted. +run the docker daemon with the `-r=false` so that docker will not +automatically restart your containers when the host is restarted. When you have finished setting up your image and are happy with your running container, you can then attach a process manager to manage it. -When your run `docker start -a` docker will -automatically attach to the running container, or start it if needed and -forward all signals so that the process manager can detect when a -container stops and correctly restart it. +When your run `docker start -a` docker will automatically attach to the +running container, or start it if needed and forward all signals so that +the process manager can detect when a container stops and correctly +restart it. Here are a few sample scripts for systemd and upstart to integrate with docker. @@ -27,9 +26,8 @@ docker. ## Sample Upstart Script In this example We've already created a container to run Redis with -`--name redis_server`. To create an upstart script -for our container, we create a file named -`/etc/init/redis.conf` and place the following into +`--name redis_server`. To create an upstart script for our container, we +create a file named `/etc/init/redis.conf` and place the following into it: description "Redis container" diff --git a/docs/sources/examples/https.md b/docs/sources/articles/https.md similarity index 98% rename from docs/sources/examples/https.md rename to docs/sources/articles/https.md index c46cf6b88c..cc8c6a9761 100644 --- a/docs/sources/examples/https.md +++ b/docs/sources/articles/https.md @@ -67,13 +67,13 @@ Now you can make the Docker daemon only accept connections from clients providing a certificate trusted by our CA: $ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \ - -H=0.0.0.0:4243 + -H=0.0.0.0:2375 To be able to connect to Docker and validate its certificate, you now need to provide your client keys, certificates and trusted CA: $ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \ - -H=dns-name-of-docker-host:4243 + -H=dns-name-of-docker-host:2375 > **Warning**: > As shown in the example above, you don't have to run the diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md new file mode 100644 index 0000000000..a4640f3665 --- /dev/null +++ b/docs/sources/articles/networking.md @@ -0,0 +1,703 @@ +page_title: Network Configuration +page_description: Docker networking +page_keywords: network, networking, bridge, docker, documentation + +# Network Configuration + +## TL;DR + +When Docker starts, it creates a virtual interface named `docker0` on +the host machine. It randomly chooses an address and subnet from the +private range defined by [RFC 1918](http://tools.ietf.org/html/rfc1918) +that are not in use on the host machine, and assigns it to `docker0`. +Docker made the choice `172.17.42.1/16` when I started it a few minutes +ago, for example — a 16-bit netmask providing 65,534 addresses for the +host machine and its containers. + +> **Note:** +> This document discusses advanced networking configuration +> and options for Docker. In most cases you won't need this information. +> If you're looking to get started with a simpler explanation of Docker +> networking and an introduction to the concept of container linking see +> the [Docker User Guide](/userguide/dockerlinks/). + +But `docker0` is no ordinary interface. It is a virtual *Ethernet +bridge* that automatically forwards packets between any other network +interfaces that are attached to it. This lets containers communicate +both with the host machine and with each other. Every time Docker +creates a container, it creates a pair of “peer” interfaces that are +like opposite ends of a pipe — a packet send on one will be received on +the other. It gives one of the peers to the container to become its +`eth0` interface and keeps the other peer, with a unique name like +`vethAQI2QT`, out in the namespace of the host machine. By binding +every `veth*` interface to the `docker0` bridge, Docker creates a +virtual subnet shared between the host machine and every Docker +container. + +The remaining sections of this document explain all of the ways that you +can use Docker options and — in advanced cases — raw Linux networking +commands to tweak, supplement, or entirely replace Docker's default +networking configuration. + +## Quick Guide to the Options + +Here is a quick list of the networking-related Docker command-line +options, in case it helps you find the section below that you are +looking for. + +Some networking command-line options can only be supplied to the Docker +server when it starts up, and cannot be changed once it is running: + + * `-b BRIDGE` or `--bridge=BRIDGE` — see + [Building your own bridge](#bridge-building) + + * `--bip=CIDR` — see + [Customizing docker0](#docker0) + + * `-H SOCKET...` or `--host=SOCKET...` — + This might sound like it would affect container networking, + but it actually faces in the other direction: + it tells the Docker server over what channels + it should be willing to receive commands + like “run container” and “stop container.” + + * `--icc=true|false` — see + [Communication between containers](#between-containers) + + * `--ip=IP_ADDRESS` — see + [Binding container ports](#binding-ports) + + * `--ip-forward=true|false` — see + [Communication between containers](#between-containers) + + * `--iptables=true|false` — see + [Communication between containers](#between-containers) + + * `--mtu=BYTES` — see + [Customizing docker0](#docker0) + +There are two networking options that can be supplied either at startup +or when `docker run` is invoked. When provided at startup, set the +default value that `docker run` will later use if the options are not +specified: + + * `--dns=IP_ADDRESS...` — see + [Configuring DNS](#dns) + + * `--dns-search=DOMAIN...` — see + [Configuring DNS](#dns) + +Finally, several networking options can only be provided when calling +`docker run` because they specify something specific to one container: + + * `-h HOSTNAME` or `--hostname=HOSTNAME` — see + [Configuring DNS](#dns) and + [How Docker networks a container](#container-networking) + + * `--link=CONTAINER_NAME:ALIAS` — see + [Configuring DNS](#dns) and + [Communication between containers](#between-containers) + + * `--net=bridge|none|container:NAME_or_ID|host` — see + [How Docker networks a container](#container-networking) + + * `-p SPEC` or `--publish=SPEC` — see + [Binding container ports](#binding-ports) + + * `-P` or `--publish-all=true|false` — see + [Binding container ports](#binding-ports) + +The following sections tackle all of the above topics in an order that +moves roughly from simplest to most complex. + +## Configuring DNS + +How can Docker supply each container with a hostname and DNS +configuration, without having to build a custom image with the hostname +written inside? Its trick is to overlay three crucial `/etc` files +inside the container with virtual files where it can write fresh +information. You can see this by running `mount` inside a container: + + $$ mount + ... + /dev/disk/by-uuid/1fec...ebdf on /etc/hostname type ext4 ... + /dev/disk/by-uuid/1fec...ebdf on /etc/hosts type ext4 ... + tmpfs on /etc/resolv.conf type tmpfs ... + ... + +This arrangement allows Docker to do clever things like keep +`resolv.conf` up to date across all containers when the host machine +receives new configuration over DHCP later. The exact details of how +Docker maintains these files inside the container can change from one +Docker version to the next, so you should leave the files themselves +alone and use the following Docker options instead. + +Four different options affect container domain name services. + + * `-h HOSTNAME` or `--hostname=HOSTNAME` — sets the hostname by which + the container knows itself. This is written into `/etc/hostname`, + into `/etc/hosts` as the name of the container’s host-facing IP + address, and is the name that `/bin/bash` inside the container will + display inside its prompt. But the hostname is not easy to see from + outside the container. It will not appear in `docker ps` nor in the + `/etc/hosts` file of any other container. + + * `--link=CONTAINER_NAME:ALIAS` — using this option as you `run` a + container gives the new container’s `/etc/hosts` an extra entry + named `ALIAS` that points to the IP address of the container named + `CONTAINER_NAME`. This lets processes inside the new container + connect to the hostname `ALIAS` without having to know its IP. The + `--link=` option is discussed in more detail below, in the section + [Communication between containers](#between-containers). + + * `--dns=IP_ADDRESS...` — sets the IP addresses added as `server` + lines to the container's `/etc/resolv.conf` file. Processes in the + container, when confronted with a hostname not in `/etc/hosts`, will + connect to these IP addresses on port 53 looking for name resolution + services. + + * `--dns-search=DOMAIN...` — sets the domain names that are searched + when a bare unqualified hostname is used inside of the container, by + writing `search` lines into the container’s `/etc/resolv.conf`. + When a container process attempts to access `host` and the search + domain `exmaple.com` is set, for instance, the DNS logic will not + only look up `host` but also `host.example.com`. + +Note that Docker, in the absence of either of the last two options +above, will make `/etc/resolv.conf` inside of each container look like +the `/etc/resolv.conf` of the host machine where the `docker` daemon is +running. The options then modify this default configuration. + +## Communication between containers + +Whether two containers can communicate is governed, at the operating +system level, by three factors. + +1. Does the network topology even connect the containers’ network + interfaces? By default Docker will attach all containers to a + single `docker0` bridge, providing a path for packets to travel + between them. See the later sections of this document for other + possible topologies. + +2. Is the host machine willing to forward IP packets? This is governed + by the `ip_forward` system parameter. Packets can only pass between + containers if this parameter is `1`. Usually you will simply leave + the Docker server at its default setting `--ip-forward=true` and + Docker will go set `ip_forward` to `1` for you when the server + starts up. To check the setting or turn it on manually: + + # Usually not necessary: turning on forwarding, + # on the host where your Docker server is running + + $ cat /proc/sys/net/ipv4/ip_forward + 0 + $ sudo echo 1 > /proc/sys/net/ipv4/ip_forward + $ cat /proc/sys/net/ipv4/ip_forward + 1 + +3. Do your `iptables` allow this particular connection to be made? + Docker will never make changes to your system `iptables` rules if + you set `--iptables=false` when the daemon starts. Otherwise the + Docker server will add a default rule to the `FORWARD` chain with a + blanket `ACCEPT` policy if you retain the default `--icc=true`, or + else will set the policy to `DROP` if `--icc=false`. + +Nearly everyone using Docker will want `ip_forward` to be on, to at +least make communication *possible* between containers. But it is a +strategic question whether to leave `--icc=true` or change it to +`--icc=false` (on Ubuntu, by editing the `DOCKER_OPTS` variable in +`/etc/default/docker` and restarting the Docker server) so that +`iptables` will protect other containers — and the main host — from +having arbitrary ports probed or accessed by a container that gets +compromised. + +If you choose the most secure setting of `--icc=false`, then how can +containers communicate in those cases where you *want* them to provide +each other services? + +The answer is the `--link=CONTAINER_NAME:ALIAS` option, which was +mentioned in the previous section because of its effect upon name +services. If the Docker daemon is running with both `--icc=false` and +`--iptables=true` then, when it sees `docker run` invoked with the +`--link=` option, the Docker server will insert a pair of `iptables` +`ACCEPT` rules so that the new container can connect to the ports +exposed by the other container — the ports that it mentioned in the +`EXPOSE` lines of its `Dockerfile`. Docker has more documentation on +this subject — see the [linking Docker containers](/userguide/dockerlinks) +page for further details. + +> **Note**: +> The value `CONTAINER_NAME` in `--link=` must either be an +> auto-assigned Docker name like `stupefied_pare` or else the name you +> assigned with `--name=` when you ran `docker run`. It cannot be a +> hostname, which Docker will not recognize in the context of the +> `--link=` option. + +You can run the `iptables` command on your Docker host to see whether +the `FORWARD` chain has a default policy of `ACCEPT` or `DROP`: + + # When --icc=false, you should see a DROP rule: + + $ sudo iptables -L -n + ... + Chain FORWARD (policy ACCEPT) + target prot opt source destination + DROP all -- 0.0.0.0/0 0.0.0.0/0 + ... + + # When a --link= has been created under --icc=false, + # you should see port-specific ACCEPT rules overriding + # the subsequent DROP policy for all other packets: + + $ sudo iptables -L -n + ... + Chain FORWARD (policy ACCEPT) + target prot opt source destination + ACCEPT tcp -- 172.17.0.2 172.17.0.3 tcp spt:80 + ACCEPT tcp -- 172.17.0.3 172.17.0.2 tcp dpt:80 + DROP all -- 0.0.0.0/0 0.0.0.0/0 + +> **Note**: +> Docker is careful that its host-wide `iptables` rules fully expose +> containers to each other’s raw IP addresses, so connections from one +> container to another should always appear to be originating from the +> first container’s own IP address. + +## Binding container ports to the host + +By default Docker containers can make connections to the outside world, +but the outside world cannot connect to containers. Each outgoing +connection will appear to originate from one of the host machine’s own +IP addresses thanks to an `iptables` masquerading rule on the host +machine that the Docker server creates when it starts: + + # You can see that the Docker server creates a + # masquerade rule that let containers connect + # to IP addresses in the outside world: + + $ sudo iptables -t nat -L -n + ... + Chain POSTROUTING (policy ACCEPT) + target prot opt source destination + MASQUERADE all -- 172.17.0.0/16 !172.17.0.0/16 + ... + +But if you want containers to accept incoming connections, you will need +to provide special options when invoking `docker run`. These options +are covered in more detail in the [Docker User Guide](/userguide/dockerlinks) +page. There are two approaches. + +First, you can supply `-P` or `--publish-all=true|false` to `docker run` +which is a blanket operation that identifies every port with an `EXPOSE` +line in the image’s `Dockerfile` and maps it to a host port somewhere in +the range 49000–49900. This tends to be a bit inconvenient, since you +then have to run other `docker` sub-commands to learn which external +port a given service was mapped to. + +More convenient is the `-p SPEC` or `--publish=SPEC` option which lets +you be explicit about exactly which external port on the Docker server — +which can be any port at all, not just those in the 49000–49900 block — +you want mapped to which port in the container. + +Either way, you should be able to peek at what Docker has accomplished +in your network stack by examining your NAT tables. + + # What your NAT rules might look like when Docker + # is finished setting up a -P forward: + + $ iptables -t nat -L -n + ... + Chain DOCKER (2 references) + target prot opt source destination + DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:49153 to:172.17.0.2:80 + + # What your NAT rules might look like when Docker + # is finished setting up a -p 80:80 forward: + + Chain DOCKER (2 references) + target prot opt source destination + DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 to:172.17.0.2:80 + +You can see that Docker has exposed these container ports on `0.0.0.0`, +the wildcard IP address that will match any possible incoming port on +the host machine. If you want to be more restrictive and only allow +container services to be contacted through a specific external interface +on the host machine, you have two choices. When you invoke `docker run` +you can use either `-p IP:host_port:container_port` or `-p IP::port` to +specify the external interface for one particular binding. + +Or if you always want Docker port forwards to bind to one specific IP +address, you can edit your system-wide Docker server settings (on +Ubuntu, by editing `DOCKER_OPTS` in `/etc/default/docker`) and add the +option `--ip=IP_ADDRESS`. Remember to restart your Docker server after +editing this setting. + +Again, this topic is covered without all of these low-level networking +details in the [Docker User Guide](/userguide/dockerlinks/) document if you +would like to use that as your port redirection reference instead. + +## Customizing docker0 + +By default, the Docker server creates and configures the host system’s +`docker0` interface as an *Ethernet bridge* inside the Linux kernel that +can pass packets back and forth between other physical or virtual +network interfaces so that they behave as a single Ethernet network. + +Docker configures `docker0` with an IP address and netmask so the host +machine can both receive and send packets to containers connected to the +bridge, and gives it an MTU — the *maximum transmission unit* or largest +packet length that the interface will allow — of either 1,500 bytes or +else a more specific value copied from the Docker host’s interface that +supports its default route. Both are configurable at server startup: + + * `--bip=CIDR` — supply a specific IP address and netmask for the + `docker0` bridge, using standard CIDR notation like + `192.168.1.5/24`. + + * `--mtu=BYTES` — override the maximum packet length on `docker0`. + +On Ubuntu you would add these to the `DOCKER_OPTS` setting in +`/etc/default/docker` on your Docker host and restarting the Docker +service. + +Once you have one or more containers up and running, you can confirm +that Docker has properly connected them to the `docker0` bridge by +running the `brctl` command on the host machine and looking at the +`interfaces` column of the output. Here is a host with two different +containers connected: + + # Display bridge info + + $ sudo brctl show + bridge name bridge id STP enabled interfaces + docker0 8000.3a1d7362b4ee no veth65f9 + vethdda6 + +If the `brctl` command is not installed on your Docker host, then on +Ubuntu you should be able to run `sudo apt-get install bridge-utils` to +install it. + +Finally, the `docker0` Ethernet bridge settings are used every time you +create a new container. Docker selects a free IP address from the range +available on the bridge each time you `docker run` a new container, and +configures the container’s `eth0` interface with that IP address and the +bridge’s netmask. The Docker host’s own IP address on the bridge is +used as the default gateway by which each container reaches the rest of +the Internet. + + # The network, as seen from a container + + $ sudo docker run -i -t --rm base /bin/bash + + $$ ip addr show eth0 + 24: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 32:6f:e0:35:57:91 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.3/16 scope global eth0 + valid_lft forever preferred_lft forever + inet6 fe80::306f:e0ff:fe35:5791/64 scope link + valid_lft forever preferred_lft forever + + $$ ip route + default via 172.17.42.1 dev eth0 + 172.17.0.0/16 dev eth0 proto kernel scope link src 172.17.0.3 + + $$ exit + +Remember that the Docker host will not be willing to forward container +packets out on to the Internet unless its `ip_forward` system setting is +`1` — see the section above on [Communication between +containers](#between-containers) for details. + +## Building your own bridge + +If you want to take Docker out of the business of creating its own +Ethernet bridge entirely, you can set up your own bridge before starting +Docker and use `-b BRIDGE` or `--bridge=BRIDGE` to tell Docker to use +your bridge instead. If you already have Docker up and running with its +old `bridge0` still configured, you will probably want to begin by +stopping the service and removing the interface: + + # Stopping Docker and removing docker0 + + $ sudo service docker stop + $ sudo ip link set dev docker0 down + $ sudo brctl delbr docker0 + +Then, before starting the Docker service, create your own bridge and +give it whatever configuration you want. Here we will create a simple +enough bridge that we really could just have used the options in the +previous section to customize `docker0`, but it will be enough to +illustrate the technique. + + # Create our own bridge + + $ sudo brctl addbr bridge0 + $ sudo ip addr add 192.168.5.1/24 dev bridge0 + $ sudo ip link set dev bridge0 up + + # Confirming that our bridge is up and running + + $ ip addr show bridge0 + 4: bridge0: mtu 1500 qdisc noop state UP group default + link/ether 66:38:d0:0d:76:18 brd ff:ff:ff:ff:ff:ff + inet 192.168.5.1/24 scope global bridge0 + valid_lft forever preferred_lft forever + + # Tell Docker about it and restart (on Ubuntu) + + $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker + $ sudo service docker start + +The result should be that the Docker server starts successfully and is +now prepared to bind containers to the new bridge. After pausing to +verify the bridge’s configuration, try creating a container — you will +see that its IP address is in your new IP address range, which Docker +will have auto-detected. + +Just as we learned in the previous section, you can use the `brctl show` +command to see Docker add and remove interfaces from the bridge as you +start and stop containers, and can run `ip addr` and `ip route` inside a +container to see that it has been given an address in the bridge’s IP +address range and has been told to use the Docker host’s IP address on +the bridge as its default gateway to the rest of the Internet. + +## How Docker networks a container + +While Docker is under active development and continues to tweak and +improve its network configuration logic, the shell commands in this +section are rough equivalents to the steps that Docker takes when +configuring networking for each new container. + +Let’s review a few basics. + +To communicate using the Internet Protocol (IP), a machine needs access +to at least one network interface at which packets can be sent and +received, and a routing table that defines the range of IP addresses +reachable through that interface. Network interfaces do not have to be +physical devices. In fact, the `lo` loopback interface available on +every Linux machine (and inside each Docker container) is entirely +virtual — the Linux kernel simply copies loopback packets directly from +the sender’s memory into the receiver’s memory. + +Docker uses special virtual interfaces to let containers communicate +with the host machine — pairs of virtual interfaces called “peers” that +are linked inside of the host machine’s kernel so that packets can +travel between them. They are simple to create, as we will see in a +moment. + +The steps with which Docker configures a container are: + +1. Create a pair of peer virtual interfaces. + +2. Give one of them a unique name like `veth65f9`, keep it inside of + the main Docker host, and bind it to `docker0` or whatever bridge + Docker is supposed to be using. + +3. Toss the other interface over the wall into the new container (which + will already have been provided with an `lo` interface) and rename + it to the much prettier name `eth0` since, inside of the container’s + separate and unique network interface namespace, there are no + physical interfaces with which this name could collide. + +4. Give the container’s `eth0` a new IP address from within the + bridge’s range of network addresses, and set its default route to + the IP address that the Docker host owns on the bridge. + +With these steps complete, the container now possesses an `eth0` +(virtual) network card and will find itself able to communicate with +other containers and the rest of the Internet. + +You can opt out of the above process for a particular container by +giving the `--net=` option to `docker run`, which takes four possible +values. + + * `--net=bridge` — The default action, that connects the container to + the Docker bridge as described above. + + * `--net=host` — Tells Docker to skip placing the container inside of + a separate network stack. In essence, this choice tells Docker to + **not containerize the container’s networking**! While container + processes will still be confined to their own filesystem and process + list and resource limits, a quick `ip addr` command will show you + that, network-wise, they live “outside” in the main Docker host and + have full access to its network interfaces. Note that this does + **not** let the container reconfigure the host network stack — that + would require `--privileged=true` — but it does let container + processes open low-numbered ports like any other root process. + + * `--net=container:NAME_or_ID` — Tells Docker to put this container’s + processes inside of the network stack that has already been created + inside of another container. The new container’s processes will be + confined to their own filesystem and process list and resource + limits, but will share the same IP address and port numbers as the + first container, and processes on the two containers will be able to + connect to each other over the loopback interface. + + * `--net=none` — Tells Docker to put the container inside of its own + network stack but not to take any steps to configure its network, + leaving you free to build any of the custom configurations explored + in the last few sections of this document. + +To get an idea of the steps that are necessary if you use `--net=none` +as described in that last bullet point, here are the commands that you +would run to reach roughly the same configuration as if you had let +Docker do all of the configuration: + + # At one shell, start a container and + # leave its shell idle and running + + $ sudo docker run -i -t --rm --net=none base /bin/bash + root@63f36fc01b5f:/# + + # At another shell, learn the container process ID + # and create its namespace entry in /var/run/netns/ + # for the "ip netns" command we will be using below + + $ sudo docker inspect -f '{{.State.Pid}}' 63f36fc01b5f + 2778 + $ pid=2778 + $ sudo mkdir -p /var/run/netns + $ sudo ln -s /proc/$pid/ns/net /var/run/netns/$pid + + # Check the bridge’s IP address and netmask + + $ ip addr show docker0 + 21: docker0: ... + inet 172.17.42.1/16 scope global docker0 + ... + + # Create a pair of "peer" interfaces A and B, + # bind the A end to the bridge, and bring it up + + $ sudo ip link add A type veth peer name B + $ sudo brctl addif docker0 A + $ sudo ip link set A up + + # Place B inside the container's network namespace, + # rename to eth0, and activate it with a free IP + + $ sudo ip link set B netns $pid + $ sudo ip netns exec $pid ip link set dev B name eth0 + $ sudo ip netns exec $pid ip link set eth0 up + $ sudo ip netns exec $pid ip addr add 172.17.42.99/16 dev eth0 + $ sudo ip netns exec $pid ip route add default via 172.17.42.1 + +At this point your container should be able to perform networking +operations as usual. + +When you finally exit the shell and Docker cleans up the container, the +network namespace is destroyed along with our virtual `eth0` — whose +destruction in turn destroys interface `A` out in the Docker host and +automatically un-registers it from the `docker0` bridge. So everything +gets cleaned up without our having to run any extra commands! Well, +almost everything: + + # Clean up dangling symlinks in /var/run/netns + + find -L /var/run/netns -type l -delete + +Also note that while the script above used modern `ip` command instead +of old deprecated wrappers like `ipconfig` and `route`, these older +commands would also have worked inside of our container. The `ip addr` +command can be typed as `ip a` if you are in a hurry. + +Finally, note the importance of the `ip netns exec` command, which let +us reach inside and configure a network namespace as root. The same +commands would not have worked if run inside of the container, because +part of safe containerization is that Docker strips container processes +of the right to configure their own networks. Using `ip netns exec` is +what let us finish up the configuration without having to take the +dangerous step of running the container itself with `--privileged=true`. + +## Tools and Examples + +Before diving into the following sections on custom network topologies, +you might be interested in glancing at a few external tools or examples +of the same kinds of configuration. Here are two: + + * Jérôme Petazzoni has created a `pipework` shell script to help you + connect together containers in arbitrarily complex scenarios: + + + * Brandon Rhodes has created a whole network topology of Docker + containers for the next edition of Foundations of Python Network + Programming that includes routing, NAT’d firewalls, and servers that + offer HTTP, SMTP, POP, IMAP, Telnet, SSH, and FTP: + + +Both tools use networking commands very much like the ones you saw in +the previous section, and will see in the following sections. + +## Building a point-to-point connection + +By default, Docker attaches all containers to the virtual subnet +implemented by `docker0`. You can create containers that are each +connected to some different virtual subnet by creating your own bridge +as shown in [Building your own bridge](#bridge-building), starting each +container with `docker run --net=none`, and then attaching the +containers to your bridge with the shell commands shown in [How Docker +networks a container](#container-networking). + +But sometimes you want two particular containers to be able to +communicate directly without the added complexity of both being bound to +a host-wide Ethernet bridge. + +The solution is simple: when you create your pair of peer interfaces, +simply throw *both* of them into containers, and configure them as +classic point-to-point links. The two containers will then be able to +communicate directly (provided you manage to tell each container the +other’s IP address, of course). You might adjust the instructions of +the previous section to go something like this: + + # Start up two containers in two terminal windows + + $ sudo docker run -i -t --rm --net=none base /bin/bash + root@1f1f4c1f931a:/# + + $ sudo docker run -i -t --rm --net=none base /bin/bash + root@12e343489d2f:/# + + # Learn the container process IDs + # and create their namespace entries + + $ sudo docker inspect -f '{{.State.Pid}}' 1f1f4c1f931a + 2989 + $ sudo docker inspect -f '{{.State.Pid}}' 12e343489d2f + 3004 + $ sudo mkdir -p /var/run/netns + $ sudo ln -s /proc/2989/ns/net /var/run/netns/2989 + $ sudo ln -s /proc/3004/ns/net /var/run/netns/3004 + + # Create the "peer" interfaces and hand them out + + $ sudo ip link add A type veth peer name B + + $ sudo ip link set A netns 2989 + $ sudo ip netns exec 2989 ip addr add 10.1.1.1/32 dev A + $ sudo ip netns exec 2989 ip link set A up + $ sudo ip netns exec 2989 ip route add 10.1.1.2/32 dev A + + $ sudo ip link set B netns 3004 + $ sudo ip netns exec 3004 ip addr add 10.1.1.2/32 dev B + $ sudo ip netns exec 3004 ip link set B up + $ sudo ip netns exec 3004 ip route add 10.1.1.1/32 dev B + +The two containers should now be able to ping each other and make +connections sucessfully. Point-to-point links like this do not depend +on a subnet nor a netmask, but on the bare assertion made by `ip route` +that some other single IP address is connected to a particular network +interface. + +Note that point-to-point links can be safely combined with other kinds +of network connectivity — there is no need to start the containers with +`--net=none` if you want point-to-point links to be an addition to the +container’s normal networking instead of a replacement. + +A final permutation of this pattern is to create the point-to-point link +between the Docker host and one container, which would allow the host to +communicate with that one container on some single IP address and thus +communicate “out-of-band” of the bridge that connects the other, more +usual containers. But unless you have very specific networking needs +that drive you to such a solution, it is probably far preferable to use +`--icc=false` to lock down inter-container communication, as we explored +earlier. diff --git a/docs/sources/use/puppet.md b/docs/sources/articles/puppet.md similarity index 92% rename from docs/sources/use/puppet.md rename to docs/sources/articles/puppet.md index a0d20ab446..81ae05ba56 100644 --- a/docs/sources/use/puppet.md +++ b/docs/sources/articles/puppet.md @@ -12,7 +12,7 @@ page_keywords: puppet, installation, usage, docker, documentation ## Requirements To use this guide you'll need a working installation of Puppet from -[Puppetlabs](https://puppetlabs.com) . +[Puppet Labs](https://puppetlabs.com) . The module also currently uses the official PPA so only works with Ubuntu. @@ -26,8 +26,8 @@ installed using the built-in module tool. $ puppet module install garethr/docker It can also be found on -[GitHub](https://github.com/garethr/garethr-docker) if you would -rather download the source. +[GitHub](https://github.com/garethr/garethr-docker) if you would rather +download the source. ## Usage @@ -88,5 +88,6 @@ Run also contains a number of optional parameters: dns => ['8.8.8.8', '8.8.4.4'], } -Note that ports, env, dns and volumes can be set with either a single -string or as above with an array of values. +> *Note:* +> The `ports`, `env`, `dns` and `volumes` attributes can be set with either a single +> string or as above with an array of values. diff --git a/docs/sources/articles/runmetrics.md b/docs/sources/articles/runmetrics.md index 50d46047c0..bf4fe21c4e 100644 --- a/docs/sources/articles/runmetrics.md +++ b/docs/sources/articles/runmetrics.md @@ -50,7 +50,7 @@ For Docker containers using cgroups, the container name will be the full ID or long ID of the container. If a container shows up as ae836c95b4c3 in `docker ps`, its long ID might be something like `ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can -look it up with `docker inspect` or `docker ps -notrunc`. +look it up with `docker inspect` or `docker ps --no-trunc`. Putting everything together to look at the memory metrics for a Docker container, take a look at `/sys/fs/cgroup/memory/lxc//`. @@ -310,8 +310,8 @@ layer; you will also have to add traffic going through the userland proxy. Then, you will need to check those counters on a regular basis. If you -happen to use `collectd`, there is a nice plugin to -automate iptables counters collection. +happen to use `collectd`, there is a [nice plugin](https://collectd.org/wiki/index.php/Plugin:IPTables) +to automate iptables counters collection. ### Interface-level counters diff --git a/docs/sources/articles/security.md b/docs/sources/articles/security.md index 69284db836..eef2577304 100644 --- a/docs/sources/articles/security.md +++ b/docs/sources/articles/security.md @@ -38,7 +38,7 @@ of another container. Of course, if the host system is setup accordingly, containers can interact with each other through their respective network interfaces — just like they can interact with external hosts. When you specify public ports for your containers or use -[*links*](/use/working_with_links_names/#working-with-links-names) +[*links*](/userguide/dockerlinks/#working-with-links-names) then IP traffic is allowed between containers. They can ping each other, send/receive UDP packets, and establish TCP connections, but that can be restricted if necessary. From a network architecture point of view, all diff --git a/docs/sources/examples/using_supervisord.md b/docs/sources/articles/using_supervisord.md similarity index 91% rename from docs/sources/examples/using_supervisord.md rename to docs/sources/articles/using_supervisord.md index 29d2fa4525..fd7c07cabf 100644 --- a/docs/sources/examples/using_supervisord.md +++ b/docs/sources/articles/using_supervisord.md @@ -5,10 +5,6 @@ page_keywords: docker, supervisor, process management # Using Supervisor with Docker > **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root > access*](/installation/binaries/#dockergroup) @@ -16,8 +12,8 @@ Traditionally a Docker container runs a single process when it is launched, for example an Apache daemon or a SSH server daemon. Often though you want to run more than one process in a container. There are a number of ways you can achieve this ranging from using a simple Bash -script as the value of your container's `CMD` -instruction to installing a process management tool. +script as the value of your container's `CMD` instruction to installing +a process management tool. In this example we're going to make use of the process management tool, [Supervisor](http://supervisord.org/), to manage multiple processes in @@ -30,7 +26,7 @@ install and manage both an SSH daemon and an Apache daemon. Let's start by creating a basic `Dockerfile` for our new image. - FROM ubuntu:latest + FROM ubuntu:13.04 MAINTAINER examples@docker.io RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list RUN apt-get update @@ -95,9 +91,9 @@ Here We've exposed ports 22 and 80 on the container and we're running the `/usr/bin/supervisord` binary when the container launches. -## Building our container +## Building our image -We can now build our new container. +We can now build our new image. $ sudo docker build -t /supervisord . diff --git a/docs/sources/docker-io/builds.md b/docs/sources/docker-io/builds.md index 0ca058663a..c70de4006c 100644 --- a/docs/sources/docker-io/builds.md +++ b/docs/sources/docker-io/builds.md @@ -1,28 +1,32 @@ -page_title: Trusted Builds on Docker.io -page_description: Docker.io Trusted Builds -page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation, trusted, builds, trusted builds +page_title: Automated Builds on Docker.io +page_description: Docker.io Automated Builds +page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation, trusted, builds, trusted builds, automated, automated builds +# Automated Builds on Docker.io -# Trusted Builds on Docker.io +## Automated Builds -## Trusted Builds +*Automated Builds* is a special feature allowing you to specify a source +repository with a `Dockerfile` to be built by the +[Docker.io](https://index.docker.io) build clusters. The system will +clone your repository and build the `Dockerfile` using the repository as +the context. The resulting image will then be uploaded to the registry +and marked as an *Automated Build*. -*Trusted Builds* is a special feature allowing you to specify a source -repository with a *Dockerfile* to be built by the Docker build clusters. The -system will clone your repository and build the Dockerfile using the repository -as the context. The resulting image will then be uploaded to the registry and -marked as a `Trusted Build`. +Automated Builds have a number of advantages. For example, users of +*your* Automated Build can be certain that the resulting image was built +exactly how it claims to be. -Trusted Builds have a number of advantages. For example, users of *your* Trusted -Build can be certain that the resulting image was built exactly how it claims -to be. - -Furthermore, the Dockerfile will be available to anyone browsing your repository -on the registry. Another advantage of the Trusted Builds feature is the automated +Furthermore, the `Dockerfile` will be available to anyone browsing your repository +on the registry. Another advantage of the Automated Builds feature is the automated builds. This makes sure that your repository is always up to date. -### Linking with a GitHub account +Automated Builds are supported for both public and private repositories +on both [GitHub](http://github.com) and +[BitBucket](https://bitbucket.org/). -In order to setup a Trusted Build, you need to first link your [Docker.io]( +### Setting up Automated Builds with GitHub + +In order to setup an Automated Build, you need to first link your [Docker.io]( https://index.docker.io) account with a GitHub one. This will allow the registry to see your repositories. @@ -30,26 +34,31 @@ to see your repositories. > https://index.docker.io) needs to setup a GitHub service hook. Although nothing > else is done with your account, this is how GitHub manages permissions, sorry! -### Creating a Trusted Build +Click on the [Automated Builds tab](https://index.docker.io/builds/) to +get started and then select [+ Add +New](https://index.docker.io/builds/add/). -You can [create a Trusted Build](https://index.docker.io/builds/github/select/) -from any of your public GitHub repositories with a Dockerfile. +Select the [GitHub +service](https://index.docker.io/associate/github/). -> **Note:** We currently only support public repositories. To have more than -> one Docker image from the same GitHub repository, you will need to set up one -> Trusted Build per Dockerfile, each using a different image name. This rule -> applies to building multiple branches on the same GitHub repository as well. +Then follow the instructions to authorize and link your GitHub account +to Docker.io. -### GitHub organizations +#### Creating an Automated Build + +You can [create an Automated Build](https://index.docker.io/builds/github/select/) +from any of your public or private GitHub repositories with a `Dockerfile`. + +#### GitHub organizations GitHub organizations appear once your membership to that organization is made public on GitHub. To verify, you can look at the members tab for your organization on GitHub. -### GitHub service hooks +#### GitHub service hooks You can follow the below steps to configure the GitHub service hooks for your -Trusted Build: +Automated Build: @@ -74,9 +83,31 @@ Trusted Build:
-### The Dockerfile and Trusted Builds +### Setting up Automated Builds with BitBucket -During the build process, we copy the contents of your Dockerfile. We also +In order to setup an Automated Build, you need to first link your +[Docker.io]( https://index.docker.io) account with a BitBucket one. This +will allow the registry to see your repositories. + +Click on the [Automated Builds tab](https://index.docker.io/builds/) to +get started and then select [+ Add +New](https://index.docker.io/builds/add/). + +Select the [BitBucket +service](https://index.docker.io/associate/bitbucket/). + +Then follow the instructions to authorize and link your BitBucket account +to Docker.io. + +#### Creating an Automated Build + +You can [create an Automated +Build](https://index.docker.io/builds/bitbucket/select/) from any of +your public or private BitBucket repositories with a `Dockerfile`. + +### The Dockerfile and Automated Builds + +During the build process, we copy the contents of your `Dockerfile`. We also add it to the [Docker.io](https://index.docker.io) for the Docker community to see on the repository page. @@ -87,16 +118,19 @@ repository's full description. > **Warning:** > If you change the full description after a build, it will be -> rewritten the next time the Trusted Build has been built. To make changes, +> rewritten the next time the Automated Build has been built. To make changes, > modify the README.md from the Git repository. We will look for a README.md -> in the same directory as your Dockerfile. +> in the same directory as your `Dockerfile`. ### Build triggers -If you need another way to trigger your Trusted Builds outside of GitHub, you -can setup a build trigger. When you turn on the build trigger for a Trusted -Build, it will give you a URL to which you can send POST requests. This will -trigger the Trusted Build process, which is similar to GitHub webhooks. +If you need another way to trigger your Automated Builds outside of GitHub +or BitBucket, you can setup a build trigger. When you turn on the build +trigger for an Automated Build, it will give you a URL to which you can +send POST requests. This will trigger the Automated Build process, which +is similar to GitHub webhooks. + +Build Triggers are available under the Settings tab of each Automated Build. > **Note:** > You can only trigger one build at a time and no more than one @@ -105,17 +139,63 @@ trigger the Trusted Build process, which is similar to GitHub webhooks. > You can find the logs of last 10 triggers on the settings page to verify > if everything is working correctly. +### Webhooks + +Also available for Automated Builds are Webhooks. Webhooks can be called +after a successful repository push is made. + +The webhook call will generate a HTTP POST with the following JSON +payload: + +``` +{ + "push_data":{ + "pushed_at":1385141110, + "images":[ + "imagehash1", + "imagehash2", + "imagehash3" + ], + "pusher":"username" + }, + "repository":{ + "status":"Active", + "description":"my docker repo that does cool things", + "is_automated":false, + "full_description":"This is my full description", + "repo_url":"https://index.docker.io/u/username/reponame/", + "owner":"username", + "is_official":false, + "is_private":false, + "name":"reponame", + "namespace":"username", + "star_count":1, + "comment_count":1, + "date_created":1370174400, + "dockerfile":"my full dockerfile is listed here", + "repo_name":"username/reponame" + } +} +``` + +Webhooks are available under the Settings tab of each Automated +Build. + +> **Note:** If you want to test your webhook out then we recommend using +> a tool like [requestb.in](http://requestb.in/). + + ### Repository links -Repository links are a way to associate one Trusted Build with another. If one -gets updated, linking system also triggers a build for the other Trusted Build. -This makes it easy to keep your Trusted Builds up to date. +Repository links are a way to associate one Automated Build with another. If one +gets updated, linking system also triggers a build for the other Automated Build. +This makes it easy to keep your Automated Builds up to date. -To add a link, go to the settings page of a Trusted Build and click on +To add a link, go to the settings page of an Automated Build and click on *Repository Links*. Then enter the name of the repository that you want have linked. > **Warning:** > You can add more than one repository link, however, you should -> be very careful. Creating a two way relationship between Trusted Builds will +> be very careful. Creating a two way relationship between Automated Builds will > cause a never ending build loop. diff --git a/docs/sources/docker-io/index.md b/docs/sources/docker-io/index.md new file mode 100644 index 0000000000..dc83f0b281 --- /dev/null +++ b/docs/sources/docker-io/index.md @@ -0,0 +1,8 @@ +# Docker.io + +## Contents: + +- [Accounts](accounts/) +- [Repositories](repos/) +- [Automated Builds](builds/) + diff --git a/docs/sources/docker-io/repos.md b/docs/sources/docker-io/repos.md index a9bdabd89b..11170182a4 100644 --- a/docs/sources/docker-io/repos.md +++ b/docs/sources/docker-io/repos.md @@ -81,7 +81,7 @@ with a JSON payload similar to the example shown below. "repository":{ "status":"Active", "description":"my docker repo that does cool things", - "is_trusted":false, + "is_automated":false, "full_description":"This is my full description", "repo_url":"https://index.docker.io/u/username/reponame/", "owner":"username", diff --git a/docs/sources/examples.md b/docs/sources/examples.md index f1d1567f52..9dcd67a643 100644 --- a/docs/sources/examples.md +++ b/docs/sources/examples.md @@ -1,25 +1,9 @@ - # Examples -## Introduction: - -Here are some examples of how to use Docker to create running processes, -starting from a very simple *Hello World* and progressing to more -substantial services like those which you might find in production. - -## Contents: - - - [Check your Docker install](hello_world/) - - [Hello World](hello_world/#hello-world) - - [Hello World Daemon](hello_world/#hello-world-daemon) - - [Node.js Web App](nodejs_web_app/) - - [Redis Service](running_redis_service/) - - [SSH Daemon Service](running_ssh_service/) - - [CouchDB Service](couchdb_data_volumes/) - - [PostgreSQL Service](postgresql_service/) - - [Building an Image with MongoDB](mongodb/) - - [Riak Service](running_riak_service/) - - [Using Supervisor with Docker](using_supervisord/) - - [Process Management with CFEngine](cfengine_process_management/) - - [Python Web App](python_web_app/) - + - [Dockerizing a Node.js Web App](nodejs_web_app/) + - [Dockerizing a Redis Service](running_redis_service/) + - [Dockerizing an SSH Daemon Service](running_ssh_service/) + - [Dockerizing a CouchDB Service](couchdb_data_volumes/) + - [Dockerizing a PostgreSQL Service](postgresql_service/) + - [Dockerizing MongoDB](mongodb/) + - [Dockerizing a Riak Service](running_riak_service/) diff --git a/docs/sources/examples/apt-cacher-ng.md b/docs/sources/examples/apt-cacher-ng.md index 0293ac5d0b..34e4a4bf02 100644 --- a/docs/sources/examples/apt-cacher-ng.md +++ b/docs/sources/examples/apt-cacher-ng.md @@ -1,14 +1,10 @@ -page_title: Running an apt-cacher-ng service +page_title: Dockerizing an apt-cacher-ng service page_description: Installing and running an apt-cacher-ng service page_keywords: docker, example, package installation, networking, debian, ubuntu -# Apt-Cacher-ng Service +# Dockerizing an Apt-Cacher-ng Service > **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root > access*](/installation/binaries/#dockergroup). > - **If you're using OS X or docker via TCP** then you shouldn't use diff --git a/docs/sources/examples/couchdb_data_volumes.md b/docs/sources/examples/couchdb_data_volumes.md index 17490487aa..44043d6411 100644 --- a/docs/sources/examples/couchdb_data_volumes.md +++ b/docs/sources/examples/couchdb_data_volumes.md @@ -1,14 +1,10 @@ -page_title: Sharing data between 2 couchdb databases +page_title: Dockerizing a CouchDB Service page_description: Sharing data between 2 couchdb databases page_keywords: docker, example, package installation, networking, couchdb, data volumes -# CouchDB Service +# Dockerizing a CouchDB Service > **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root > access*](/installation/binaries/#dockergroup) @@ -28,7 +24,7 @@ We're assuming your Docker host is reachable at `localhost`. If not, replace `localhost` with the public IP of your Docker host. $ HOST=localhost - $ URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/" + $ URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -o '[1-9][0-9]*$')/_utils/" $ echo "Navigate to $URL in your browser, and use the couch interface to add data" ## Create second database @@ -40,7 +36,7 @@ This time, we're requesting shared access to `$COUCH1`'s volumes. ## Browse data on the second database $ HOST=localhost - $ URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/" + $ URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -o '[1-9][0-9]*$')/_utils/" $ echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!' Congratulations, you are now running two Couchdb containers, completely diff --git a/docs/sources/examples/example_header.inc b/docs/sources/examples/example_header.inc deleted file mode 100644 index 5841141e59..0000000000 --- a/docs/sources/examples/example_header.inc +++ /dev/null @@ -1,8 +0,0 @@ - -.. note:: - - * This example assumes you have Docker running in daemon mode. For - more information please see :ref:`running_examples`. - * **If you don't like sudo** then see :ref:`dockergroup` - * **If you're using OS X or docker via TCP** then you shouldn't use `sudo` - diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md deleted file mode 100644 index 177857816c..0000000000 --- a/docs/sources/examples/hello_world.md +++ /dev/null @@ -1,162 +0,0 @@ -page_title: Hello world example -page_description: A simple hello world example with Docker -page_keywords: docker, example, hello world - -# Check your Docker installation - -This guide assumes you have a working installation of Docker. To check -your Docker install, run the following command: - - # Check that you have a working install - $ sudo docker info - -If you get `docker: command not found` or something -like `/var/lib/docker/repositories: permission denied` -you may have an incomplete Docker installation or insufficient -privileges to access docker on your machine. - -Please refer to [*Installation*](/installation/) -for installation instructions. - -## Hello World - -> **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](#check-your-docker-installation). -> - **If you don't like sudo** then see [*Giving non-root -> access*](/installation/binaries/#dockergroup) - -This is the most basic example available for using Docker. - -Download the small base image named `busybox`: - - # Download a busybox image - $ sudo docker pull busybox - -The `busybox` image is a minimal Linux system. You can do the same with -any number of other images, such as `debian`, `ubuntu` or `centos`. The -images can be found and retrieved using the -[Docker.io](http://index.docker.io) registry. - - $ sudo docker run busybox /bin/echo hello world - -This command will run a simple `echo` command, that -will echo `hello world` back to the console over -standard out. - -**Explanation:** - -- **"sudo"** execute the following commands as user *root* -- **"docker run"** run a command in a new container -- **"busybox"** is the image we are running the command in. -- **"/bin/echo"** is the command we want to run in the container -- **"hello world"** is the input for the echo command - -**Video:** - -See the example in action - - - - - -## Hello World Daemon - -> **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](#check-your-docker-installation). -> - **If you don't like sudo** then see [*Giving non-root -> access*](/installation/binaries/#dockergroup) - -And now for the most boring daemon ever written! - -We will use the Ubuntu image to run a simple hello world daemon that -will just print hello world to standard out every second. It will -continue to do this until we stop it. - -**Steps:** - - $ container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") - -We are going to run a simple hello world daemon in a new container made -from the `ubuntu` image. - - - **"sudo docker run -d "** run a command in a new container. We pass - "-d" so it runs as a daemon. - - **"ubuntu"** is the image we want to run the command inside of. - - **"/bin/sh -c"** is the command we want to run in the container - - **"while true; do echo hello world; sleep 1; done"** is the mini - script we want to run, that will just print hello world once a - second until we stop it. - - **$container_id** the output of the run command will return a - container id, we can use in future commands to see what is going on - with this process. - - - - $ sudo docker logs $container_id - -Check the logs make sure it is working correctly. - - - **"docker logs**" This will return the logs for a container - - **$container_id** The Id of the container we want the logs for. - - - - $ sudo docker attach --sig-proxy=false $container_id - -Attach to the container to see the results in real-time. - - - **"docker attach**" This will allow us to attach to a background - process to see what is going on. - - **"–sig-proxy=false"** Do not forward signals to the container; - allows us to exit the attachment using Control-C without stopping - the container. - - **$container_id** The Id of the container we want to attach to. - -Exit from the container attachment by pressing Control-C. - - $ sudo docker ps - -Check the process list to make sure it is running. - - - **"docker ps"** this shows all running process managed by docker - - - - $ sudo docker stop $container_id - -Stop the container, since we don't need it anymore. - - - **"docker stop"** This stops a container - - **$container_id** The Id of the container we want to stop. - - - - $ sudo docker ps - -Make sure it is really stopped. - -**Video:** - -See the example in action - - - - - -The next example in the series is a [*Node.js Web App*]( -../nodejs_web_app/#nodejs-web-app) example, or you could skip to any of the -other examples: - - - [*Node.js Web App*](../nodejs_web_app/#nodejs-web-app) - - [*Redis Service*](../running_redis_service/#running-redis-service) - - [*SSH Daemon Service*](../running_ssh_service/#running-ssh-service) - - [*CouchDB Service*](../couchdb_data_volumes/#running-couchdb-service) - - [*PostgreSQL Service*](../postgresql_service/#postgresql-service) - - [*Building an Image with MongoDB*](../mongodb/#mongodb-image) - - [*Python Web App*](../python_web_app/#python-web-app) diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md index 4b5f95d023..17d58e0dbc 100644 --- a/docs/sources/examples/mongodb.md +++ b/docs/sources/examples/mongodb.md @@ -1,89 +1,154 @@ -page_title: Building a Docker Image with MongoDB -page_description: How to build a Docker image with MongoDB pre-installed -page_keywords: docker, example, package installation, networking, mongodb +page_title: Dockerizing MongoDB +page_description: Creating a Docker image with MongoDB pre-installed using a Dockerfile and sharing the image on Docker.io +page_keywords: docker, dockerize, dockerizing, article, example, docker.io, platform, package, installation, networking, mongodb, containers, images, image, sharing, dockerfile, build, auto-building, virtualization, framework -# Building an Image with MongoDB +# Dockerizing MongoDB -> **Note**: +## Introduction + +In this example, we are going to learn how to build a Docker image +with MongoDB pre-installed. +We'll also see how to `push` that image to the [Docker.io registry]( +https://index.docker.io) and share it with others! + +Using Docker and containers for deploying [MongoDB](https://www.mongodb.org/) +instances will bring several benefits, such as: + + - Easy to maintain, highly configurable MongoDB instances; + - Ready to run and start working within milliseconds; + - Based on globally accessible and shareable images. + +> **Note:** > -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). -> - **If you don't like sudo** then see [*Giving non-root -> access*](/installation/binaries/#dockergroup) +> If you do **_not_** like `sudo`, you might want to check out: +> [*Giving non-root access*](/installation/binaries/#giving-non-root-access). -The goal of this example is to show how you can build your own Docker -images with MongoDB pre-installed. We will do that by constructing a -Dockerfile that downloads a base image, adds an -apt source and installs the database software on Ubuntu. +## Creating a Dockerfile for MongoDB -## Creating a Dockerfile +Let's create our `Dockerfile` and start building it: -Create an empty file called Dockerfile: + $ nano Dockerfile - $ touch Dockerfile +Although optional, it is handy to have comments at the beginning of a +`Dockerfile` explaining its purpose: -Next, define the parent image you want to use to build your own image on -top of. Here, we'll use [Ubuntu](https://index.docker.io/_/ubuntu/) -(tag: `latest`) available on the [docker -index](http://index.docker.io): + # Dockerizing MongoDB: Dockerfile for building MongoDB images + # Based on ubuntu:latest, installs MongoDB following the instructions from: + # http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ - FROM ubuntu:latest +> **Tip:** `Dockerfile`s are flexible. However, they need to follow a certain +> format. The first item to be defined is the name of an image, which becomes +> the *parent* of your *Dockerized MongoDB* image. -Since we want to be running the latest version of MongoDB we'll need to -add the 10gen repo to our apt sources list. +We will build our image using the latest version of Ubuntu from the +[Docker.io Ubuntu](https://index.docker.io/_/ubuntu/) repository. - # Add 10gen official apt source to the sources list + # Format: FROM repository[:version] + FROM ubuntu:latest + +Continuing, we will declare the `MAINTAINER` of the `Dockerfile`: + + # Format: MAINTAINER Name + MAINTAINER M.Y. Name + +> **Note:** Although Ubuntu systems have MongoDB packages, they are likely to +> be outdated. Therefore in this example, we will use the official MongoDB +> packages. + +We will begin with importing the MongoDB public GPG key. We will also create +a MongoDB repository file for the package manager. + + # Installation: + # Import MongoDB public GPG key AND create a MongoDB list file RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list -Then, we don't want Ubuntu to complain about init not being available so -we'll divert `/sbin/initctl` to -`/bin/true` so it thinks everything is working. +After this initial preparation we can update our packages and install MongoDB. - # Hack for initctl not being available in Ubuntu - RUN dpkg-divert --local --rename --add /sbin/initctl - RUN ln -s /bin/true /sbin/initctl - -Afterwards we'll be able to update our apt repositories and install -MongoDB - - # Install MongoDB + # Update apt-get sources AND install MongoDB RUN apt-get update - RUN apt-get install mongodb-10gen + RUN apt-get install -y -q mongodb-org -To run MongoDB we'll have to create the default data directory (because -we want it to run without needing to provide a special configuration -file) +> **Tip:** You can install a specific version of MongoDB by using a list +> of required packages with versions, e.g.: +> +> RUN apt-get install -y -q mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1 + +MongoDB requires a data directory. Let's create it as the final step of our +installation instructions. # Create the MongoDB data directory RUN mkdir -p /data/db -Finally, we'll expose the standard port that MongoDB runs on, 27107, as -well as define an `ENTRYPOINT` instruction for the -container. +Lastly we set the `ENTRYPOINT` which will tell Docker to run `mongod` inside +the containers launched from our MongoDB image. And for ports, we will use +the `EXPOSE` instruction. + # Expose port 27017 from the container to the host EXPOSE 27017 - ENTRYPOINT ["usr/bin/mongod"] -Now, lets build the image which will go through the -Dockerfile we made and run all of the commands. + # Set usr/bin/mongod as the dockerized entry-point application + ENTRYPOINT usr/bin/mongod - $ sudo docker build -t /mongodb . +Now save the file and let's build our image. -Now you should be able to run `mongod` as a daemon -and be able to connect on the local port! +> **Note:** +> +> The full version of this `Dockerfile` can be found [here](/examples/mongodb/Dockerfile). - # Regular style - $ MONGO_ID=$(sudo docker run -d /mongodb) +## Building the MongoDB Docker image - # Lean and mean - $ MONGO_ID=$(sudo docker run -d /mongodb --noprealloc --smallfiles) +With our `Dockerfile`, we can now build the MongoDB image using Docker. Unless +experimenting, it is always a good practice to tag Docker images by passing the +`--tag` option to `docker build` command. - # Check the logs out - $ sudo docker logs $MONGO_ID + # Format: sudo docker build --tag/-t / . + # Example: + $ sudo docker build --tag my/repo . - # Connect and play around - $ mongo --port +Once this command is issued, Docker will go through the `Dockerfile` and build +the image. The final image will be tagged `my/repo`. -Sweet! +## Pushing the MongoDB image to Docker.io + +All Docker image repositories can be hosted and shared on +[Docker.io](https://index.docker.io) with the `docker push` command. For this, +you need to be logged-in. + + # Log-in + $ sudo docker login + Username: + .. + + # Push the image + # Format: sudo docker push / + $ sudo docker push my/repo + The push refers to a repository [my/repo] (len: 1) + Sending image list + Pushing repository my/repo (1 tags) + .. + +## Using the MongoDB image + +Using the MongoDB image we created, we can run one or more MongoDB instances +as daemon process(es). + + # Basic way + # Usage: sudo docker run --name -d / + $ sudo docker run --name mongo_instance_001 -d my/repo + + # Dockerized MongoDB, lean and mean! + # Usage: sudo docker run --name -d / --noprealloc --smallfiles + $ sudo docker run --name mongo_instance_001 -d my/repo --noprealloc --smallfiles + + # Checking out the logs of a MongoDB container + # Usage: sudo docker logs + $ sudo docker logs mongo_instance_001 + + # Playing with MongoDB + # Usage: mongo --port + $ mongo --port 12345 + + - [Linking containers](/userguide/dockerlinks) + - [Cross-host linking containers](/articles/ambassador_pattern_linking/) + - [Creating an Automated Build](/docker-io/builds/#automated-builds) diff --git a/docs/sources/examples/mongodb/Dockerfile b/docs/sources/examples/mongodb/Dockerfile new file mode 100644 index 0000000000..e7acc0fd85 --- /dev/null +++ b/docs/sources/examples/mongodb/Dockerfile @@ -0,0 +1,24 @@ +# Dockerizing MongoDB: Dockerfile for building MongoDB images +# Based on ubuntu:latest, installs MongoDB following the instructions from: +# http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ + +FROM ubuntu:latest +MAINTAINER Docker + +# Installation: +# Import MongoDB public GPG key AND create a MongoDB list file +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 +RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list + +# Update apt-get sources AND install MongoDB +RUN apt-get update +RUN apt-get install -y -q mongodb-org + +# Create the MongoDB data directory +RUN mkdir -p /data/db + +# Expose port #27017 from the container to the host +EXPOSE 27017 + +# Set usr/bin/mongod as the dockerized entry-point application +ENTRYPOINT usr/bin/mongod diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md index bc0e908d2d..99946e99e0 100644 --- a/docs/sources/examples/nodejs_web_app.md +++ b/docs/sources/examples/nodejs_web_app.md @@ -1,14 +1,10 @@ -page_title: Running a Node.js app on CentOS -page_description: Installing and running a Node.js app on CentOS +page_title: Dockerizing a Node.js Web App +page_description: Installing and running a Node.js app with Docker page_keywords: docker, example, package installation, node, centos -# Node.js Web App +# Dockerizing a Node.js Web App > **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root > access*](/installation/binaries/#dockergroup) @@ -16,7 +12,7 @@ The goal of this example is to show you how you can build your own Docker images from a parent image using a `Dockerfile` . We will do that by making a simple Node.js hello world web application running on CentOS. You can get the full source code at -[https://github.com/gasi/docker-node-hello](https://github.com/gasi/docker-node-hello). +[https://github.com/enokd/docker-node-hello/](https://github.com/enokd/docker-node-hello/). ## Create Node.js app @@ -187,11 +183,10 @@ Now you can call your app using `curl` (install if needed via: Content-Length: 12 Date: Sun, 02 Jun 2013 03:53:22 GMT Connection: keep-alive - + Hello World We hope this tutorial helped you get up and running with Node.js and CentOS on Docker. You can get the full source code at [https://github.com/gasi/docker-node-hello](https://github.com/gasi/docker-node-hello). -Continue to [*Redis Service*](../running_redis_service/#running-redis-service). diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md index 14d9e647a3..b931fd8ba4 100644 --- a/docs/sources/examples/postgresql_service.md +++ b/docs/sources/examples/postgresql_service.md @@ -1,26 +1,22 @@ -page_title: PostgreSQL service How-To +page_title: Dockerizing PostgreSQL page_description: Running and installing a PostgreSQL service page_keywords: docker, example, package installation, postgresql -# PostgreSQL Service +# Dockerizing PostgreSQL > **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root > access*](/installation/binaries/#dockergroup) ## Installing PostgreSQL on Docker -Assuming there is no Docker image that suits your needs in [the index]( -http://index.docker.io), you can create one yourself. +Assuming there is no Docker image that suits your needs on the [Docker +Hub]( http://index.docker.io), you can create one yourself. -Start by creating a new Dockerfile: +Start by creating a new `Dockerfile`: > **Note**: -> This PostgreSQL setup is for development only purposes. Refer to the +> This PostgreSQL setup is for development-only purposes. Refer to the > PostgreSQL documentation to fine-tune these settings so that it is > suitably secure. @@ -32,7 +28,7 @@ Start by creating a new Dockerfile: MAINTAINER SvenDowideit@docker.com # Add the PostgreSQL PGP key to verify their Debian packages. - # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc + # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 # Add PostgreSQL's repository. It contains the most recent stable release @@ -84,14 +80,14 @@ Build an image from the Dockerfile assign it a name. And run the PostgreSQL server container (in the foreground): - $ sudo docker run -rm -P -name pg_test eg_postgresql + $ sudo docker run --rm -P --name pg_test eg_postgresql There are 2 ways to connect to the PostgreSQL server. We can use [*Link -Containers*](/use/working_with_links_names/#working-with-links-names), -or we can access it from our host (or the network). +Containers*](/userguide/dockerlinks), or we can access it from our host +(or the network). > **Note**: -> The `-rm` removes the container and its image when +> The `--rm` removes the container and its image when > the container exists successfully. ### Using container linking @@ -101,7 +97,7 @@ Containers can be linked to another container's ports directly using `docker run`. This will set a number of environment variables that can then be used to connect: - $ sudo docker run -rm -t -i -link pg_test:pg eg_postgresql bash + $ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password @@ -143,7 +139,7 @@ prompt, you can create a table and populate it. You can use the defined volumes to inspect the PostgreSQL log files and to backup your configuration and data: - $ docker run -rm --volumes-from pg_test -t -i busybox sh + $ docker run --rm --volumes-from pg_test -t -i busybox sh / # ls bin etc lib linuxrc mnt proc run sys usr diff --git a/docs/sources/examples/python_web_app.md b/docs/sources/examples/python_web_app.md deleted file mode 100644 index e761003a9e..0000000000 --- a/docs/sources/examples/python_web_app.md +++ /dev/null @@ -1,127 +0,0 @@ -page_title: Python Web app example -page_description: Building your own python web app using docker -page_keywords: docker, example, python, web app - -# Python Web App - -> **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). -> - **If you don't like sudo** then see [*Giving non-root -> access*](/installation/binaries/#dockergroup) - -While using Dockerfiles is the preferred way to create maintainable and -repeatable images, its useful to know how you can try things out and -then commit your live changes to an image. - -The goal of this example is to show you how you can modify your own -Docker images by making changes to a running container, and then saving -the results as a new image. We will do that by making a simple `hello -world` Flask web application image. - -## Download the initial image - -Download the `shykes/pybuilder` Docker image from the `http://index.docker.io` -registry. - -This image contains a `buildapp` script to download -the web app and then `pip install` any required -modules, and a `runapp` script that finds the -`app.py` and runs it. - - $ sudo docker pull shykes/pybuilder - -> **Note**: -> This container was built with a very old version of docker (May 2013 - -> see [shykes/pybuilder](https://github.com/shykes/pybuilder) ), when the -> Dockerfile format was different, but the image can -> still be used now. - -## Interactively make some modifications - -We then start a new container running interactively using the image. -First, we set a `URL` variable that points to a -tarball of a simple helloflask web app, and then we run a command -contained in the image called `buildapp`, passing it -the `$URL` variable. The container is given a name -`pybuilder_run` which we will use in the next steps. - -While this example is simple, you could run any number of interactive -commands, try things out, and then exit when you're done. - - $ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash - - $$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz - $$ /usr/local/bin/buildapp $URL - [...] - $$ exit - -## Commit the container to create a new image - -Save the changes we just made in the container to a new image called -`/builds/github.com/shykes/helloflask/master`. You -now have 3 different ways to refer to the container: name -`pybuilder_run`, short-id `c8b2e8228f11`, or long-id -`c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9`. - - $ sudo docker commit pybuilder_run /builds/github.com/shykes/helloflask/master - c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9 - -## Run the new image to start the web worker - -Use the new image to create a new container with network port 5000 -mapped to a local port - - $ sudo docker run -d -p 5000 --name web_worker /builds/github.com/shykes/helloflask/master /usr/local/bin/runapp - - - **"docker run -d "** run a command in a new container. We pass "-d" - so it runs as a daemon. - - **"-p 5000"** the web app is going to listen on this port, so it - must be mapped from the container to the host system. - - **/usr/local/bin/runapp** is the command which starts the web app. - -## View the container logs - -View the logs for the new `web_worker` container and -if everything worked as planned you should see the line -`Running on http://0.0.0.0:5000/` in the log output. - -To exit the view without stopping the container, hit Ctrl-C, or open -another terminal and continue with the example while watching the result -in the logs. - - $ sudo docker logs -f web_worker - * Running on http://0.0.0.0:5000/ - -## See the webapp output - -Look up the public-facing port which is NAT-ed. Find the private port -used by the container and store it inside of the `WEB_PORT` -variable. - -Access the web app using the `curl` binary. If -everything worked as planned you should see the line -`Hello world!` inside of your console. - - $ WEB_PORT=$(sudo docker port web_worker 5000 | awk -F: '{ print $2 }') - - # install curl if necessary, then ... - $ curl http://127.0.0.1:$WEB_PORT - Hello world! - -## Clean up example containers and images - - $ sudo docker ps --all - -List `--all` the Docker containers. If this -container had already finished running, it will still be listed here -with a status of `Exit 0`. - - $ sudo docker stop web_worker - $ sudo docker rm web_worker pybuilder_run - $ sudo docker rmi /builds/github.com/shykes/helloflask/master shykes/pybuilder:latest - -And now stop the running web worker, and delete the containers, so that -we can then delete the images that we used. diff --git a/docs/sources/examples/running_redis_service.md b/docs/sources/examples/running_redis_service.md index ca67048625..0eeef0625d 100644 --- a/docs/sources/examples/running_redis_service.md +++ b/docs/sources/examples/running_redis_service.md @@ -1,16 +1,8 @@ -page_title: Running a Redis service +page_title: Dockerizing a Redis service page_description: Installing and running an redis service page_keywords: docker, example, package installation, networking, redis -# Redis Service - -> **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). -> - **If you don't like sudo** then see [*Giving non-root -> access*](/installation/binaries/#dockergroup) +# Dockerizing a Redis Service Very simple, no frills, Redis service attached to a web application using a link. diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md index 852035f9a4..098cc9094b 100644 --- a/docs/sources/examples/running_riak_service.md +++ b/docs/sources/examples/running_riak_service.md @@ -1,30 +1,21 @@ -page_title: Running a Riak service +page_title: Dockerizing a Riak service page_description: Build a Docker image with Riak pre-installed page_keywords: docker, example, package installation, networking, riak -# Riak Service - -> **Note**: -> -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). -> - **If you don't like sudo** then see [*Giving non-root -> access*](/installation/binaries/#dockergroup) +# Dockerizing a Riak Service The goal of this example is to show you how to build a Docker image with Riak pre-installed. ## Creating a Dockerfile -Create an empty file called Dockerfile: +Create an empty file called `Dockerfile`: $ touch Dockerfile Next, define the parent image you want to use to build your image on top of. We'll use [Ubuntu](https://index.docker.io/_/ubuntu/) (tag: -`latest`), which is available on the [docker -index](http://index.docker.io): +`latest`), which is available on [Docker Hub](http://index.docker.io): # Riak # @@ -101,7 +92,7 @@ are started: ## Create a supervisord configuration file Create an empty file called `supervisord.conf`. Make -sure it's at the same directory level as your Dockerfile: +sure it's at the same directory level as your `Dockerfile`: touch supervisord.conf diff --git a/docs/sources/examples/running_ssh_service.Dockerfile b/docs/sources/examples/running_ssh_service.Dockerfile index dd2acb7a4b..978e610422 100644 --- a/docs/sources/examples/running_ssh_service.Dockerfile +++ b/docs/sources/examples/running_ssh_service.Dockerfile @@ -2,16 +2,15 @@ # # VERSION 0.0.1 -FROM ubuntu +FROM debian MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com" # make sure the package repository is up to date -RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list RUN apt-get update RUN apt-get install -y openssh-server -RUN mkdir /var/run/sshd +RUN mkdir /var/run/sshd RUN echo 'root:screencast' |chpasswd EXPOSE 22 -CMD /usr/sbin/sshd -D +CMD ["/usr/sbin/sshd", "-D"] diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md index 864d10c726..27439f998f 100644 --- a/docs/sources/examples/running_ssh_service.md +++ b/docs/sources/examples/running_ssh_service.md @@ -1,17 +1,10 @@ -page_title: Running an SSH service -page_description: Installing and running an sshd service +page_title: Dockerizing an SSH service +page_description: Installing and running an SSHd service on Docker page_keywords: docker, example, package installation, networking -# SSH Daemon Service +# Dockerizing an SSH Daemon Service -> **Note:** -> - This example assumes you have Docker running in daemon mode. For -> more information please see [*Check your Docker -> install*](../hello_world/#running-examples). -> - **If you don't like sudo** then see [*Giving non-root -> access*](/installation/binaries/#dockergroup) - -The following Dockerfile sets up an sshd service in a container that you +The following `Dockerfile` sets up an SSHd service in a container that you can use to connect to and inspect other container's volumes, or to get quick access to a test container. @@ -19,34 +12,32 @@ quick access to a test container. # # VERSION 0.0.1 - FROM ubuntu + FROM debian MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com" # make sure the package repository is up to date - RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list RUN apt-get update RUN apt-get install -y openssh-server - RUN mkdir /var/run/sshd + RUN mkdir /var/run/sshd RUN echo 'root:screencast' |chpasswd EXPOSE 22 - CMD /usr/sbin/sshd -D + CMD ["/usr/sbin/sshd", "-D"] Build the image using: - $ sudo docker build -rm -t eg_sshd . + $ sudo docker build --rm -t eg_sshd . -Then run it. You can then use `docker port` to find -out what host port the container's port 22 is mapped to: +Then run it. You can then use `docker port` to find out what host port +the container's port 22 is mapped to: - $ sudo docker run -d -P -name test_sshd eg_sshd + $ sudo docker run -d -P --name test_sshd eg_sshd $ sudo docker port test_sshd 22 0.0.0.0:49154 -And now you can ssh to port `49154` on the Docker -daemon's host IP address (`ip address` or -`ifconfig` can tell you that): +And now you can ssh to port `49154` on the Docker daemon's host IP +address (`ip address` or `ifconfig` can tell you that): $ ssh root@192.168.1.2 -p 49154 # The password is ``screencast``. @@ -58,3 +49,4 @@ container, and then removing the image. $ sudo docker stop test_sshd $ sudo docker rm test_sshd $ sudo docker rmi eg_sshd + diff --git a/docs/sources/faq.md b/docs/sources/faq.md index 2494f33e9c..8dbdfd184f 100644 --- a/docs/sources/faq.md +++ b/docs/sources/faq.md @@ -142,12 +142,11 @@ running in parallel. ### How do I connect Docker containers? Currently the recommended way to link containers is via the link -primitive. You can see details of how to [work with links here]( -http://docs.docker.io/use/working_with_links_names/). +primitive. You can see details of how to [work with links +here](/userguide/dockerlinks). Also of useful when enabling more flexible service portability is the -[Ambassador linking pattern]( -http://docs.docker.io/use/ambassador_pattern_linking/). +[Ambassador linking pattern](/articles/ambassador_pattern_linking/). ### How do I run more than one process in a Docker container? @@ -156,8 +155,7 @@ http://supervisord.org/), runit, s6, or daemontools can do the trick. Docker will start up the process management daemon which will then fork to run additional processes. As long as the processor manager daemon continues to run, the container will continue to as well. You can see a more substantial -example [that uses supervisord here]( -http://docs.docker.io/examples/using_supervisord/). +example [that uses supervisord here](/articles/using_supervisord/). ### What platforms does Docker run on? @@ -207,5 +205,5 @@ You can find more answers on: - [Ask questions on Stackoverflow](http://stackoverflow.com/search?q=docker) - [Join the conversation on Twitter](http://twitter.com/docker) -Looking for something else to read? Checkout the [*Hello World*]( -../examples/hello_world/#hello-world) example. +Looking for something else to read? Checkout the [User +Guide](/userguide/). diff --git a/docs/sources/index.md b/docs/sources/index.md index d582321563..dc18fcdad8 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -1,82 +1,93 @@ page_title: About Docker -page_description: Docker introduction home page +page_description: Introduction to Docker. page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile # About Docker -*Secure And Portable Containers Made Easy* +**Develop, Ship and Run Any Application, Anywhere** -## Introduction +[**Docker**](https://www.docker.io) is a platform for developers and sysadmins +to develop, ship, and run applications. Docker lets you quickly assemble +applications from components and eliminates the friction that can come when +shipping code. Docker lets you get your code tested and deployed into production +as fast as possible. -[**Docker**](https://www.docker.io) is a container based virtualization -framework. Unlike traditional virtualization Docker is fast, lightweight -and easy to use. Docker allows you to create containers holding -all the dependencies for an application. Each container is kept isolated -from any other, and nothing gets shared. +Docker consists of: -## Docker highlights +* The Docker Engine - our lightweight and powerful open source container + virtualization technology combined with a work flow for building + and containerizing your applications. +* [Docker.io](https://index.docker.io) - our SAAS service for + sharing and managing your application stacks. - - **Containers provide sand-boxing:** - Applications run securely without outside access. - - **Docker allows simple portability:** - Containers are directories, they can be zipped and transported. - - **It all works fast:** - Starting a container is a very fast single process. - - **Docker is easy on the system resources (unlike VMs):** - No more than what each application needs. - - **Agnostic in its _essence_:** - Free of framework, language or platform dependencies. +## Why Docker? -And most importantly: +- **Faster delivery of your applications** + * We want your environment to work better. Docker containers, + and the work flow that comes with them, help your developers, + sysadmins, QA folks, and release engineers work together to get your code + into production and make it useful. We've created a standard + container format that lets developers care about their applications + inside containers while sysadmins and operators can work on running the + container in your deployment. This separation of duties streamlines and + simplifies the management and deployment of code. + * We make it easy to build new containers, enable rapid iteration of + your applications, and increase the visibility of changes. This + helps everyone in your organization understand how an application works + and how it is built. + * Docker containers are lightweight and fast! Containers have + sub-second launch times, reducing the cycle + time of development, testing, and deployment. - - **Docker reduces complexity:** - Docker accepts commands *in plain English*, e.g. `docker run [..]`. +- **Deploy and scale more easily** + * Docker containers run (almost) everywhere. You can deploy + containers on desktops, physical servers, virtual machines, into + data centers, and up to public and private clouds. + * Since Docker runs on so many platforms, it's easy to move your + applications around. You can easily move an application from a + testing environment into the cloud and back whenever you need. + * Docker's lightweight containers Docker also make scaling up and + down fast and easy. You can quickly launch more containers when + needed and then shut them down easily when they're no longer needed. + +- **Get higher density and run more workloads** + * Docker containers don't need a hypervisor, so you can pack more of + them onto your hosts. This means you get more value out of every + server and can potentially reduce what you spend on equipment and + licenses. + +- **Faster deployment makes for easier management** + * As Docker speeds up your work flow, it gets easier to make lots + of small changes instead of huge, big bang updates. Smaller + changes mean reduced risk and more uptime. ## About this guide -In this introduction we will take you on a tour and show you what -makes Docker tick. +First, the [Understanding Docker +section](introduction/understanding-docker.md) will help you: -On the [**first page**](introduction/understanding-docker.md), which is -**_informative_**: + - See how Docker works at a high level + - Understand the architecture of Docker + - Discover Docker's features; + - See how Docker compares to virtual machines + - See some common use cases. - - You will find information on Docker; - - And discover Docker's features. - - We will also compare Docker to virtual machines; - - And see some common use cases. +> [Click here to go to the Understanding +> Docker section](introduction/understanding-docker.md). -> [Click here to go to Understanding Docker](introduction/understanding-docker.md). +### Installation Guides -The [**second page**](introduction/technology.md) has **_technical_** information on: +Next, we'll show you how to install Docker on a variety of platforms in the +[installation](/installation/#installation) section. - - The architecture of Docker; - - The underlying technology, and; - - *How* Docker works. +> [Click here to go to the Installation +> section](/installation/#installation). -> [Click here to go to Understanding the Technology](introduction/technology.md). +### Docker User Guide -On the [**third page**](introduction/working-with-docker.md) we get **_practical_**. -There you can: +Once you've gotten Docker installed we recommend you work through the +[Docker User Guide](/userguide/), to learn about Docker in more detail and +answer questions about usage and implementation. - - Learn about Docker's components (i.e. Containers, Images and the - Dockerfile); - - And get started working with them straight away. +> [Click here to go to the Docker User Guide](/userguide/). -> [Click here to go to Working with Docker](introduction/working-with-docker.md). - -Finally, on the [**fourth**](introduction/get-docker.md) page, we go **_hands on_** -and see: - - - The installation instructions, and; - - How Docker makes some hard problems much, much easier. - -> [Click here to go to Get Docker](introduction/get-docker.md). - -> **Note**: -> We know how valuable your time is. Therefore, the documentation is prepared -> in a way to allow anyone to start from any section need. Although we strongly -> recommend that you visit [Understanding Docker]( -> introduction/understanding-docker.md) to see how Docker is different, if you -> already have some knowledge and want to quickly get started with Docker, -> don't hesitate to jump to [Working with Docker]( -> introduction/working-with-docker.md). diff --git a/docs/sources/installation.md b/docs/sources/installation.md index 66b28b2b3c..1c3c726594 100644 --- a/docs/sources/installation.md +++ b/docs/sources/installation.md @@ -12,6 +12,7 @@ techniques for installing Docker all the time. - [Ubuntu](ubuntulinux/) - [Red Hat Enterprise Linux](rhel/) - [Fedora](fedora/) + - [Debian](debian/) - [Arch Linux](archlinux/) - [CRUX Linux](cruxlinux/) - [Gentoo](gentoolinux/) @@ -22,4 +23,4 @@ techniques for installing Docker all the time. - [Amazon EC2](amazon/) - [Rackspace Cloud](rackspace/) - [Google Cloud Platform](google/) - - [Binaries](binaries/) \ No newline at end of file + - [Binaries](binaries/) diff --git a/docs/sources/installation/amazon.md b/docs/sources/installation/amazon.md index 61a12d6b43..86443147e7 100644 --- a/docs/sources/installation/amazon.md +++ b/docs/sources/installation/amazon.md @@ -1,15 +1,9 @@ page_title: Installation on Amazon EC2 -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on Amazon EC2. page_keywords: amazon ec2, virtualization, cloud, docker, documentation, installation # Amazon EC2 -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - There are several ways to install Docker on AWS EC2: - [*Amazon QuickStart (Release Candidate - March 2014)*]( @@ -59,8 +53,7 @@ add the *ubuntu* user to it so that you don't have to use `sudo` for every Docker command. Once you`ve got Docker installed, you're ready to try it out – head on -over to the [*First steps with Docker*](/use/basics/) or -[*Examples*](/examples/) section. +over to the [User Guide](/userguide). ## Amazon QuickStart (Release Candidate - March 2014) @@ -100,4 +93,4 @@ QuickStart*](#amazon-quickstart) to pick an image (or use one of your own) and skip the step with the *User Data*. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) instructions. -Continue with the [*Hello World*](/examples/hello_world/#hello-world) example. +Continue with the [User Guide](/userguide/). diff --git a/docs/sources/installation/archlinux.md b/docs/sources/installation/archlinux.md index c6d4f73fb8..81cc21fb02 100644 --- a/docs/sources/installation/archlinux.md +++ b/docs/sources/installation/archlinux.md @@ -1,21 +1,9 @@ page_title: Installation on Arch Linux -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on ArchLinux. page_keywords: arch linux, virtualization, docker, documentation, installation # Arch Linux -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - -> **Note**: -> This is a community contributed installation path. The only `official` -> installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) -> installation path. This version may be out of date because it depends on -> some binaries to be updated and published - Installing on Arch Linux can be handled via the package in community: - [docker](https://www.archlinux.org/packages/community/x86_64/docker/) diff --git a/docs/sources/installation/binaries.md b/docs/sources/installation/binaries.md index 36aa0ae249..97e2f93c4e 100644 --- a/docs/sources/installation/binaries.md +++ b/docs/sources/installation/binaries.md @@ -1,15 +1,9 @@ page_title: Installation from Binaries -page_description: This instruction set is meant for hackers who want to try out Docker on a variety of environments. +page_description: Instructions for installing Docker as a binary. Mostly meant for hackers who want to try out Docker on a variety of environments. page_keywords: binaries, installation, docker, documentation, linux # Binaries -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - **This instruction set is meant for hackers who want to try out Docker on a variety of environments.** @@ -62,20 +56,17 @@ Linux kernel (it even builds on OSX!). ## Giving non-root access -The `docker` daemon always runs as the root user, -and since Docker version 0.5.2, the `docker` daemon -binds to a Unix socket instead of a TCP port. By default that Unix -socket is owned by the user *root*, and so, by default, you can access -it with `sudo`. +The `docker` daemon always runs as the root user, and the `docker` +daemon binds to a Unix socket instead of a TCP port. By default that +Unix socket is owned by the user *root*, and so, by default, you can +access it with `sudo`. -Starting in version 0.5.3, if you (or your Docker installer) create a -Unix group called *docker* and add users to it, then the -`docker` daemon will make the ownership of the Unix -socket read/writable by the *docker* group when the daemon starts. The -`docker` daemon must always run as the root user, -but if you run the `docker` client as a user in the -*docker* group then you don't need to add `sudo` to -all the client commands. +If you (or your Docker installer) create a Unix group called *docker* +and add users to it, then the `docker` daemon will make the ownership of +the Unix socket read/writable by the *docker* group when the daemon +starts. The `docker` daemon must always run as the root user, but if you +run the `docker` client as a user in the *docker* group then you don't +need to add `sudo` to all the client commands. > **Warning**: > The *docker* group (or the group specified with `-G`) is root-equivalent; @@ -99,4 +90,4 @@ Then follow the regular installation steps. # run a container and open an interactive shell in the container $ sudo ./docker run -i -t ubuntu /bin/bash -Continue with the [*Hello World*](/examples/hello_world/#hello-world) example. +Continue with the [User Guide](/userguide/). diff --git a/docs/sources/installation/centos.md b/docs/sources/installation/centos.md new file mode 100644 index 0000000000..3966d0f092 --- /dev/null +++ b/docs/sources/installation/centos.md @@ -0,0 +1,78 @@ +page_title: Installation on CentOS +page_description: Instructions for installing Docker on CentOS +page_keywords: Docker, Docker documentation, requirements, linux, centos, epel, docker.io, docker-io + +# CentOS + +The Docker package is available via the EPEL repository. These +instructions work for CentOS 6 and later. They will likely work for +other binary compatible EL6 distributions such as Scientific Linux, but +they haven't been tested. + +Please note that this package is part of [Extra Packages for Enterprise +Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort +to create and maintain additional packages for the RHEL distribution. + +Also note that due to the current Docker limitations, Docker is able to +run only on the **64 bit** architecture. + +To run Docker, you will need [CentOS6](http://www.centos.org) or higher, +with a kernel version 2.6.32-431 or higher as this has specific kernel +fixes to allow Docker to run. + +## Installation + +Firstly, you need to ensure you have the EPEL repository enabled. Please +follow the [EPEL installation instructions]( +https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F). + +The `docker-io` package provides Docker on EPEL. + +If you already have the (unrelated) `docker` package +installed, it will conflict with `docker-io`. +There's a [bug report]( +https://bugzilla.redhat.com/show_bug.cgi?id=1043676) filed for it. +To proceed with `docker-io` installation, please remove `docker` first. + +Next, let's install the `docker-io` package which +will install Docker on our host. + + $ sudo yum install docker-io + +Now that it's installed, let's start the Docker daemon. + + $ sudo service docker start + +If we want Docker to start at boot, we should also: + + $ sudo chkconfig docker on + +Now let's verify that Docker is working. First we'll need to get the latest +`centos` image. + + $ sudo docker pull centos:latest + +Next we'll make sure that we can see the image by running: + + $ sudo docker images centos + +This should generate some output similar to: + + $ sudo docker images centos + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + centos latest 0b443ba03958 2 hours ago 297.6 MB + +Run a simple bash shell to test the image: + + $ sudo docker run -i -t centos /bin/bash + +If everything is working properly, you'll get a simple bash prompt. Type +exit to continue. + +**Done!** You can either continue with the [Docker User +Guide](/userguide/) or explore and build on the images yourself. + +## Issues? + +If you have any issues - please report them directly in the +[CentOS bug tracker](http://bugs.centos.org). diff --git a/docs/sources/installation/cruxlinux.md b/docs/sources/installation/cruxlinux.md index d1a4de7367..28efde376a 100644 --- a/docs/sources/installation/cruxlinux.md +++ b/docs/sources/installation/cruxlinux.md @@ -4,63 +4,25 @@ page_keywords: crux linux, virtualization, Docker, documentation, installation # CRUX Linux -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - -> **Note**: -> This is a community contributed installation path. The only `official` -> installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) -> installation path. This version may be out of date because it depends on -> some binaries to be updated and published. - Installing on CRUX Linux can be handled via the ports from [James -Mills](http://prologic.shortcircuit.net.au/): +Mills](http://prologic.shortcircuit.net.au/) and are included in the +official [contrib](http://crux.nu/portdb/?a=repo&q=contrib) ports: -- [docker](https://bitbucket.org/prologic/ports/src/tip/docker/) -- [docker-bin](https://bitbucket.org/prologic/ports/src/tip/docker-bin/) -- [docker-git](https://bitbucket.org/prologic/ports/src/tip/docker-git/) +- docker +- docker-bin The `docker` port will install the latest tagged version of Docker. The `docker-bin` port will -install the latest tagged versin of Docker from upstream built binaries. -The `docker-git` package will build from the current -master branch. +install the latest tagged version of Docker from upstream built binaries. ## Installation -For the time being (*until the CRUX Docker port(s) get into the official -contrib repository*) you will need to install [James -Mills`](https://bitbucket.org/prologic/ports) ports repository. You can -do so via: +Assuming you have contrib enabled, update your ports tree and install docker (*as root*): -Download the `httpup` file to -`/etc/ports/`: + # prt-get depinst docker - $ curl -q -o - http://crux.nu/portdb/?a=getup&q=prologic > /etc/ports/prologic.httpup +You can install `docker-bin` instead if you wish to avoid compilation time. -Add `prtdir /usr/ports/prologic` to -`/etc/prt-get.conf`: - - $ vim /etc/prt-get.conf - - # or: - $ echo "prtdir /usr/ports/prologic" >> /etc/prt-get.conf - -Update ports and prt-get cache: - - $ ports -u - $ prt-get cache - -To install (*and its dependencies*): - - $ prt-get depinst docker - -Use `docker-bin` for the upstream binary or -`docker-git` to build and install from the master -branch from git. ## Kernel Requirements @@ -68,24 +30,34 @@ To have a working **CRUX+Docker** Host you must ensure your Kernel has the necessary modules enabled for LXC containers to function correctly and Docker Daemon to work properly. -Please read the `README.rst`: +Please read the `README`: $ prt-get readme docker -There is a `test_kernel_config.sh` script in the -above ports which you can use to test your Kernel configuration: +The `docker` and `docker-bin` ports install the `contrib/check-config.sh` +script provided by the Docker contributors for checking your kernel +configuration as a suitable Docker Host. - $ cd /usr/ports/prologic/docker - $ ./test_kernel_config.sh /usr/src/linux/.config + $ /usr/share/docker/check-config.sh ## Starting Docker -There is a rc script created for Docker. To start the Docker service: +There is a rc script created for Docker. To start the Docker service (*as root*): - $ sudo su - - $ /etc/rc.d/docker start + # /etc/rc.d/docker start To start on system boot: - Edit `/etc/rc.conf` - Put `docker` into the `SERVICES=(...)` array after `net`. + +## Issues + +If you have any issues please file a bug with the +[CRUX Bug Tracker](http://crux.nu/bugs/). + +## Support + +For support contact the [CRUX Mailing List](http://crux.nu/Main/MailingLists) +or join CRUX's [IRC Channels](http://crux.nu/Main/IrcChannels). on the +[FreeNode](http://freenode.net/) IRC Network. diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md new file mode 100644 index 0000000000..0ad54b4328 --- /dev/null +++ b/docs/sources/installation/debian.md @@ -0,0 +1,76 @@ +page_title: Installation on Debian +page_description: Instructions for installing Docker on Debian. +page_keywords: Docker, Docker documentation, installation, debian + +# Debian + +Docker is supported on the following versions of Debian: + + - [*Debian 8.0 Jessie (64-bit)*](#debian-jessie-8-64-bit) + +## Debian Jessie 8.0 (64-bit) + +Debian 8 comes with a 3.14.0 Linux kernel, and a `docker.io` package which +installs all its prerequisites from Debian's repository. + +> **Note**: +> Debian contains a much older KDE3/GNOME2 package called ``docker``, so the +> package and the executable are called ``docker.io``. + +### Installation + +To install the latest Debian package (may not be the latest Docker release): + + $ sudo apt-get update + $ sudo apt-get install docker.io + $ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker + $ sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io + +To verify that everything has worked as expected: + + $ sudo docker run -i -t ubuntu /bin/bash + +Which should download the `ubuntu` image, and then start `bash` in a container. + +> **Note**: +> If you want to enable memory and swap accounting see +> [this](/installation/ubuntulinux/#memory-and-swap-accounting). + +### Giving non-root access + +The `docker` daemon always runs as the `root` user and the `docker` +daemon binds to a Unix socket instead of a TCP port. By default that +Unix socket is owned by the user `root`, and so, by default, you can +access it with `sudo`. + +If you (or your Docker installer) create a Unix group called `docker` +and add users to it, then the `docker` daemon will make the ownership of +the Unix socket read/writable by the `docker` group when the daemon +starts. The `docker` daemon must always run as the root user, but if you +run the `docker` client as a user in the `docker` group then you don't +need to add `sudo` to all the client commands. From Docker 0.9.0 you can +use the `-G` flag to specify an alternative group. + +> **Warning**: +> The `docker` group (or the group specified with the `-G` flag) is +> `root`-equivalent; see [*Docker Daemon Attack Surface*]( +> /articles/security/#dockersecurity-daemon) details. + +**Example:** + + # Add the docker group if it doesn't already exist. + $ sudo groupadd docker + + # Add the connected user "${USER}" to the docker group. + # Change the user name to match your preferred user. + # You may have to logout and log back in again for + # this to take effect. + $ sudo gpasswd -a ${USER} docker + + # Restart the Docker daemon. + $ sudo service docker restart + +## What next? + +Continue with the [User Guide](/userguide/). + diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md index 93b5b05b13..bcd54e6bd6 100644 --- a/docs/sources/installation/fedora.md +++ b/docs/sources/installation/fedora.md @@ -1,21 +1,9 @@ page_title: Installation on Fedora -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on Fedora. page_keywords: Docker, Docker documentation, Fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux # Fedora -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - -> **Note**: -> This is a community contributed installation path. The only `official` -> installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) -> installation path. This version may be out of date because it depends on -> some binaries to be updated and published. - Docker is available in **Fedora 19 and later**. Please note that due to the current Docker limitations Docker is able to run only on the **64 bit** architecture. @@ -32,7 +20,7 @@ it. To proceed with `docker-io` installation on Fedora 19, please remove $ sudo yum -y remove docker -For Fedora 20 and later, the `wmdocker` package will +For Fedora 21 and later, the `wmdocker` package will provide the same functionality as `docker` and will also not conflict with `docker-io`. @@ -60,5 +48,7 @@ Now let's verify that Docker is working. $ sudo docker run -i -t fedora /bin/bash -**Done!**, now continue with the [*Hello -World*](/examples/hello_world/#hello-world) example. +## What next? + +Continue with the [User Guide](/userguide/). + diff --git a/docs/sources/installation/frugalware.md b/docs/sources/installation/frugalware.md index eb409d8d39..2c2f922613 100644 --- a/docs/sources/installation/frugalware.md +++ b/docs/sources/installation/frugalware.md @@ -1,21 +1,9 @@ page_title: Installation on FrugalWare -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on FrugalWare. page_keywords: frugalware linux, virtualization, docker, documentation, installation # FrugalWare -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - -> **Note**: -> This is a community contributed installation path. The only `official` -> installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) -> installation path. This version may be out of date because it depends on -> some binaries to be updated and published - Installing on FrugalWare is handled via the official packages: - [lxc-docker i686](http://www.frugalware.org/packages/200141) diff --git a/docs/sources/installation/gentoolinux.md b/docs/sources/installation/gentoolinux.md index 92329dca90..62fdc9f00e 100644 --- a/docs/sources/installation/gentoolinux.md +++ b/docs/sources/installation/gentoolinux.md @@ -1,33 +1,21 @@ page_title: Installation on Gentoo -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on Gentoo. page_keywords: gentoo linux, virtualization, docker, documentation, installation # Gentoo -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - -> **Note**: -> This is a community contributed installation path. The only `official` -> installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) -> installation path. This version may be out of date because it depends on -> some binaries to be updated and published - Installing Docker on Gentoo Linux can be accomplished using one of two methods. The first and best way if you're looking for a stable experience is to use the official app-emulation/docker package directly in the portage tree. -If you're looking for a `-bin` ebuild, a live -ebuild, or bleeding edge ebuild changes/fixes, the second installation -method is to use the overlay provided at +If you're looking for a `-bin` ebuild, a live ebuild, or bleeding edge +ebuild changes/fixes, the second installation method is to use the +overlay provided at [https://github.com/tianon/docker-overlay](https://github.com/tianon/docker-overlay) -which can be added using `app-portage/layman`. The -most accurate and up-to-date documentation for properly installing and -using the overlay can be found in [the overlay +which can be added using `app-portage/layman`. The most accurate and +up-to-date documentation for properly installing and using the overlay +can be found in [the overlay README](https://github.com/tianon/docker-overlay/blob/master/README.md#using-this-overlay). Note that sometimes there is a disparity between the latest version and diff --git a/docs/sources/installation/google.md b/docs/sources/installation/google.md index 4c22808dcb..fe3d0619ce 100644 --- a/docs/sources/installation/google.md +++ b/docs/sources/installation/google.md @@ -1,63 +1,48 @@ page_title: Installation on Google Cloud Platform -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on the Google Cloud Platform. page_keywords: Docker, Docker documentation, installation, google, Google Compute Engine, Google Cloud Platform # Google Cloud Platform -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) +## QuickStart with Container-optimized Google Compute Engine images -## Compute Engine QuickStart for Debian +1. Go to [Google Cloud Console][1] and create a new Cloud Project with + [Compute Engine enabled][2] -1. Go to [Google Cloud Console](https://cloud.google.com/console) and - create a new Cloud Project with [Compute Engine - enabled](https://developers.google.com/compute/docs/signup). -2. Download and configure the [Google Cloud SDK]( - https://developers.google.com/cloud/sdk/) to use your project - with the following commands: - - +2. Download and configure the [Google Cloud SDK][3] to use your + project with the following commands: + ``` $ curl https://dl.google.com/dl/cloudsdk/release/install_google_cloud_sdk.bash | bash $ gcloud auth login Enter a cloud project id (or leave blank to not set): - -3. Start a new instance, select a zone close to you and the desired - instance size: - - - - $ gcutil addinstance docker-playground --image=backports-debian-7 - 1: europe-west1-a ... - 4: us-central1-b - >>> - 1: machineTypes/n1-standard-1 - ... - 12: machineTypes/g1-small - >>> + ``` -4. Connect to the instance using SSH: +3. Start a new instance using the latest [Container-optimized image][4]: + (select a zone close to you and the desired instance size) - + ``` + $ gcloud compute instances create docker-playground \ + --image projects/google-containers/global/images/container-vm-v20140522 \ + --zone us-central1-a \ + --machine-type f1-micro + ``` - $ gcutil ssh docker-playground - $ docker-playground:~$ +4. Connect to the instance using SSH: -5. Install the latest Docker release and configure it to start when the - instance boots: + ``` + $ gcloud compute ssh --zone us-central1-a docker-playground + ``` + ``` + docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/' + docker on GCE \o/ + ``` - +Read more about [deploying Containers on Google Cloud Platform][5]. - $ docker-playground:~$ curl get.docker.io | bash - $ docker-playground:~$ sudo update-rc.d docker defaults - -6. Start a new container: - - - - $ docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/' - $ docker on GCE \o/ +[1]: https://cloud.google.com/console +[2]: https://developers.google.com/compute/docs/signup +[3]: https://developers.google.com/cloud/sdk +[4]: https://developers.google.com/compute/docs/containers#container-optimized_google_compute_engine_images +[5]: https://developers.google.com/compute/docs/containers diff --git a/docs/sources/installation/images/osx-installer.png b/docs/sources/installation/images/osx-installer.png new file mode 100644 index 0000000000..635ac354ed Binary files /dev/null and b/docs/sources/installation/images/osx-installer.png differ diff --git a/docs/sources/installation/images/windows-boot2docker-start.png b/docs/sources/installation/images/windows-boot2docker-start.png new file mode 100644 index 0000000000..189289638d Binary files /dev/null and b/docs/sources/installation/images/windows-boot2docker-start.png differ diff --git a/docs/sources/installation/images/windows-installer.png b/docs/sources/installation/images/windows-installer.png new file mode 100644 index 0000000000..6305dcc6d9 Binary files /dev/null and b/docs/sources/installation/images/windows-installer.png differ diff --git a/docs/sources/installation/install_header.inc b/docs/sources/installation/install_header.inc deleted file mode 100644 index c9b9e4c494..0000000000 --- a/docs/sources/installation/install_header.inc +++ /dev/null @@ -1,7 +0,0 @@ - -.. note:: - - Docker is still under heavy development! We don't recommend using - it in production yet, but we're getting closer with each - release. Please see our blog post, `"Getting to Docker 1.0" - `_ diff --git a/docs/sources/installation/install_unofficial.inc b/docs/sources/installation/install_unofficial.inc deleted file mode 100644 index 8d121918b5..0000000000 --- a/docs/sources/installation/install_unofficial.inc +++ /dev/null @@ -1,7 +0,0 @@ - -.. note:: - - This is a community contributed installation path. The only - 'official' installation is using the :ref:`ubuntu_linux` - installation path. This version may be out of date because it - depends on some binaries to be updated and published diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index c30e0b6440..c450cd12d2 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -13,174 +13,96 @@ page_keywords: Docker, Docker documentation, requirements, boot2docker, VirtualB > **Note:** > Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer. -Docker has two key components: the Docker daemon and the `docker` binary -which acts as a client. The client passes instructions to the daemon -which builds, runs and manages your Docker containers. As Docker uses -some Linux-specific kernel features you can't use it directly on OS X. -Instead we run the Docker daemon inside a lightweight virtual machine on your local -OS X host. We can then use a native client `docker` binary to communicate -with the Docker daemon inside our virtual machine. To make this process -easier we've designed a helper application called -[boot2docker](https://github.com/boot2docker/boot2docker) to install -that virtual machine and run our Docker daemon. +The Docker Engine uses Linux-specific kernel features, so we run it on OS X +using a lightweight virtual machine. You can use the OS X Docker client to +control the virtualized engine to build, run and manage Docker containers. -[boot2docker](https://github.com/boot2docker/boot2docker) uses -VirtualBox to create the virtual machine so we'll need to install that -first. +To make this process easier we designed a helper application called +[boot2docker](https://github.com/boot2docker/boot2docker) to install the +virtual machine and run the Docker daemon. -## Installing VirtualBox +## Installation -Docker on OS X needs VirtualBox to run. To begin with, head over to -[VirtualBox Download Page](https://www.virtualbox.org/wiki/Downloads) -and get the tool for `OS X hosts x86/amd64`. +1. Download the latest release of the [Docker for OSX Installer]( + https://github.com/boot2docker/osx-installer/releases) +2. Run the installer, which will install VirtualBox and the Boot2Docker management + tool. + ![](/installation/images/osx-installer.png) +3. Open a terminal and run: -Once the download is complete, open the disk image, run `VirtualBox.pkg` -and install VirtualBox. +``` + boot2docker init + boot2docker start + export DOCKER_HOST=tcp://localhost:2375 +``` -> **Note**: -> Do not simply copy the package without running the -> installer. +`boot2docker init` will ask you to enter an ssh key passphrase - the simplest +(but least secure) is to just hit [Enter]. This passphrase is used by the +`boot2docker ssh` command. -## Installing boot2docker +Once you have an initialized virtual machine, you can `boot2docker stop` +and `boot2docker start` it. -### Installing manually +## Upgrading -[boot2docker](https://github.com/boot2docker/boot2docker) provides a -handy script to manage the VM running the Docker daemon. It also takes -care of the installation of that VM. +To upgrade: -Open up a new terminal window and run the following commands to get -boot2docker: +1. Download the latest release of the [Docker for OSX Installer]( + https://github.com/boot2docker/osx-installer/releases) +2. Run the installer, which will update VirtualBox and the Boot2Docker management + tool. +3. To upgrade your existing virtual machine, open a terminal and run: - # Enter the installation directory - $ mkdir -p ~/bin - $ cd ~/bin +``` + boot2docker stop + boot2docker download + boot2docker start +``` - # Get the file - $ curl https://raw.githubusercontent.com/boot2docker/boot2docker/master/boot2docker > boot2docker +## Running Docker - # Mark it executable - $ chmod +x boot2docker +From your terminal, you can try the “hello world” example. Run: -### Installing the Docker OS X Client + $ docker run ubuntu echo hello world -The Docker daemon is accessed using the `docker` binary. +This will download the `ubuntu` image and print `hello world`. -Run the following commands to get it downloaded and set up: +## Container port redirection - # Get the docker binary - $ DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \ - curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \ - gunzip $DIR/ld.tgz && \ - tar xvf $DIR/ld.tar -C $DIR/ && \ - cp $DIR/usr/local/bin/docker ./docker +The latest version of `boot2docker` sets up two network adapters: one using NAT +to allow the VM to download images and files from the Internet, and one host only +network adapter to which the container's ports will be exposed on. - # Copy the executable file - $ sudo mkdir -p /usr/local/bin - $ sudo cp docker /usr/local/bin/ +If you run a container with an exposed port: -### Configure the Docker OS X Client +``` + docker run --rm -i -t -p 80:80 apache +``` -The Docker client, `docker`, uses an environment variable `DOCKER_HOST` -to specify the location of the Docker daemon to connect to. Specify your -local boot2docker virtual machine as the value of that variable. +Then you should be able to access that Apache server using the IP address reported +to you using: - $ export DOCKER_HOST=tcp://127.0.0.1:4243 +``` + boot2docker ssh ip addr show dev eth1 +``` -## Installing boot2docker with Homebrew +Typically, it is 192.168.59.103, but at this point it can change. -If you are using Homebrew on your machine, simply run the following -command to install `boot2docker`: +If you want to share container ports with other computers on your LAN, you will +need to set up [NAT adaptor based port forwarding]( +https://github.com/boot2docker/boot2docker/blob/master/doc/WORKAROUNDS.md) - $ brew install boot2docker +# Further details -Run the following command to install the Docker client: +The Boot2Docker management tool provides some commands: - $ brew install docker +``` +$ ./boot2docker +Usage: ./boot2docker [] +{help|init|up|ssh|save|down|poweroff|reset|restart|config|status|info|delete|download|version} +[] +``` -And that's it! Let's check out how to use it. - -# How To Use Docker On Mac OS X - -## Running the Docker daemon via boot2docker - -Firstly we need to initialize our boot2docker virtual machine. Run the -`boot2docker` command. - - $ boot2docker init - -This will setup our initial virtual machine. - -Next we need to start the Docker daemon. - - $ boot2docker up - -There are a variety of others commands available using the `boot2docker` -script. You can see these like so: - - $ boot2docker - Usage ./boot2docker {init|start|up|pause|stop|restart|status|info|delete|ssh|download} - -## The Docker client - -Once the virtual machine with the Docker daemon is up, you can use the `docker` -binary just like any other application. - - $ docker version - Client version: 0.10.0 - Client API version: 1.10 - Server version: 0.10.0 - Server API version: 1.10 - Last stable version: 0.10.0 - -## Using Docker port forwarding with boot2docker - -In order to forward network ports from Docker with boot2docker we need to -manually forward the port range Docker uses inside VirtualBox. To do -this we take the port range that Docker uses by default with the `-P` -option, ports 49000-49900, and run the following command. - -> **Note:** -> The boot2docker virtual machine must be powered off for this -> to work. - - for i in {49000..49900}; do - VBoxManage modifyvm "boot2docker-vm" --natpf1 "tcp-port$i,tcp,,$i,,$i"; - VBoxManage modifyvm "boot2docker-vm" --natpf1 "udp-port$i,udp,,$i,,$i"; - done - -## Connecting to the VM via SSH - -If you feel the need to connect to the VM, you can simply run: - - $ ./boot2docker ssh - - # User: docker - # Pwd: tcuser - -If SSH complains about keys then run: - - $ ssh-keygen -R '[localhost]:2022' - -## Upgrading to a newer release of boot2docker - -To upgrade an initialized boot2docker virtual machine, you can use the -following 3 commands. Your virtual machine's disk will not be changed, -so you won't lose your images and containers: - - $ boot2docker stop - $ boot2docker download - $ boot2docker start - -# Learn More - -## boot2docker - -See the GitHub page for -[boot2docker](https://github.com/boot2docker/boot2docker). - -# Next steps - -You can now continue with the [*Hello -World*](/examples/hello_world/#hello-world) example. +Continue with the [User Guide](/userguide/). +For further information or to report issues, please see the [Boot2Docker site](http://boot2docker.io). diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/openSUSE.md index 07f2ca43d2..ce79de2699 100644 --- a/docs/sources/installation/openSUSE.md +++ b/docs/sources/installation/openSUSE.md @@ -1,21 +1,9 @@ page_title: Installation on openSUSE -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on openSUSE. page_keywords: openSUSE, virtualbox, docker, documentation, installation # openSUSE -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - -> **Note**: -> This is a community contributed installation path. The only `official` -> installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) -> installation path. This version may be out of date because it depends on -> some binaries to be updated and published - Docker is available in **openSUSE 12.3 and later**. Please note that due to the current Docker limitations Docker is able to run only on the **64 bit** architecture. @@ -60,5 +48,6 @@ Docker daemon. $ sudo usermod -G docker **Done!** -Now continue with the [*Hello World*]( -/examples/hello_world/#hello-world) example. + +Continue with the [User Guide](/userguide/). + diff --git a/docs/sources/installation/rackspace.md b/docs/sources/installation/rackspace.md index c93af388ed..1aa969d1e5 100644 --- a/docs/sources/installation/rackspace.md +++ b/docs/sources/installation/rackspace.md @@ -1,15 +1,9 @@ page_title: Installation on Rackspace Cloud -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on Rackspace Cloud. page_keywords: Rackspace Cloud, installation, docker, linux, ubuntu # Rackspace Cloud -> **Note**: -> This is a community contributed installation path. The only `official` -> installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) -> installation path. This version may be out of date because it depends on -> some binaries to be updated and published - Installing Docker on Ubuntu provided by Rackspace is pretty straightforward, and you should mostly be able to follow the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) installation guide. diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md index 632743a2b9..c144573687 100644 --- a/docs/sources/installation/rhel.md +++ b/docs/sources/installation/rhel.md @@ -1,21 +1,9 @@ page_title: Installation on Red Hat Enterprise Linux -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on Red Hat Enterprise Linux. page_keywords: Docker, Docker documentation, requirements, linux, rhel, centos # Red Hat Enterprise Linux -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - -> **Note**: -> This is a community contributed installation path. The only `official` -> installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) -> installation path. This version may be out of date because it depends on -> some binaries to be updated and published - Docker is available for **RHEL** on EPEL. These instructions should work for both RHEL and CentOS. They will likely work for other binary compatible EL6 distributions as well, but they haven't been tested. @@ -68,7 +56,8 @@ Now let's verify that Docker is working. $ sudo docker run -i -t fedora /bin/bash **Done!** -Now continue with the [*Hello World*](/examples/hello_world/#hello-world) example. + +Continue with the [User Guide](/userguide/). ## Issues? diff --git a/docs/sources/installation/softlayer.md b/docs/sources/installation/softlayer.md index 11a192c61a..d01866720c 100644 --- a/docs/sources/installation/softlayer.md +++ b/docs/sources/installation/softlayer.md @@ -1,17 +1,9 @@ page_title: Installation on IBM SoftLayer -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Installation instructions for Docker on IBM Softlayer. page_keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, installation # IBM SoftLayer -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - -## IBM SoftLayer QuickStart - 1. Create an [IBM SoftLayer account]( https://www.softlayer.com/cloud-servers/). 2. Log in to the [SoftLayer Console]( @@ -32,5 +24,7 @@ page_keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, inst 7. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) instructions. -Continue with the [*Hello World*]( -/examples/hello_world/#hello-world) example. +## What next? + +Continue with the [User Guide](/userguide/). + diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md index d40e17b646..78a0679e55 100644 --- a/docs/sources/installation/ubuntulinux.md +++ b/docs/sources/installation/ubuntulinux.md @@ -1,15 +1,9 @@ page_title: Installation on Ubuntu -page_description: Please note this project is currently under heavy development. It should not be used in production. -page_keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux +page_description: Instructions for installing Docker on Ubuntu. +page_keywords: Docker, Docker documentation, requirements, virtualbox, installation, ubuntu # Ubuntu -> **Note**: -> Docker is still under heavy development! We don't recommend using it in -> production yet, but we're getting closer with each release. Please see -> our blog post, [Getting to Docker 1.0]( -> http://blog.docker.io/2013/08/getting-to-docker-1-0/) - Docker is supported on the following versions of Ubuntu: - [*Ubuntu Trusty 14.04 (LTS) (64-bit)*](#ubuntu-trusty-1404-lts-64-bit) @@ -36,6 +30,7 @@ To install the latest Ubuntu package (may not be the latest Docker release): $ sudo apt-get update $ sudo apt-get install docker.io $ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker + $ sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io To verify that everything has worked as expected: @@ -116,8 +111,7 @@ Now verify that the installation has worked by downloading the Type `exit` to exit -**Done!**, now continue with the [*Hello -World*](/examples/hello_world/#hello-world) example. +**Done!**, continue with the [User Guide](/userguide/). ## Ubuntu Raring 13.04 and Saucy 13.10 (64 bit) @@ -164,31 +158,27 @@ Now verify that the installation has worked by downloading the Type `exit` to exit -**Done!**, now continue with the [*Hello -World*](/examples/hello_world/#hello-world) example. +**Done!**, now continue with the [User Guide](/userguide/). ### Giving non-root access -The `docker` daemon always runs as the root user, -and since Docker version 0.5.2, the `docker` daemon -binds to a Unix socket instead of a TCP port. By default that Unix -socket is owned by the user *root*, and so, by default, you can access -it with `sudo`. +The `docker` daemon always runs as the `root` user, and since Docker +version 0.5.2, the `docker` daemon binds to a Unix socket instead of a +TCP port. By default that Unix socket is owned by the user `root`, and +so, by default, you can access it with `sudo`. Starting in version 0.5.3, if you (or your Docker installer) create a -Unix group called *docker* and add users to it, then the -`docker` daemon will make the ownership of the Unix -socket read/writable by the *docker* group when the daemon starts. The -`docker` daemon must always run as the root user, -but if you run the `docker` client as a user in the -*docker* group then you don't need to add `sudo` to -all the client commands. As of 0.9.0, you can specify that a group other -than `docker` should own the Unix socket with the -`-G` option. +Unix group called `docker` and add users to it, then the `docker` daemon +will make the ownership of the Unix socket read/writable by the `docker` +group when the daemon starts. The `docker` daemon must always run as the +`root` user, but if you run the `docker` client as a user in the +`docker` group then you don't need to add `sudo` to all the client +commands. From Docker 0.9.0 you can use the `-G` flag to specify an +alternative group. > **Warning**: -> The *docker* group (or the group specified with `-G`) is -> root-equivalent; see [*Docker Daemon Attack Surface*]( +> The `docker` group (or the group specified with the `-G` flag) is +> `root`-equivalent; see [*Docker Daemon Attack Surface*]( > /articles/security/#dockersecurity-daemon) details. **Example:** @@ -203,6 +193,7 @@ than `docker` should own the Unix socket with the $ sudo gpasswd -a ${USER} docker # Restart the Docker daemon. + # If you are in Ubuntu 14.04, use docker.io instead of docker $ sudo service docker restart ### Upgrade @@ -268,9 +259,9 @@ Then reload UFW: UFW's default set of rules denies all incoming traffic. If you want to be able to reach your containers from another host then you should allow -incoming connections on the Docker port (default 4243): +incoming connections on the Docker port (default 2375): - $ sudo ufw allow 4243/tcp + $ sudo ufw allow 2375/tcp ## Docker and local DNS server warnings diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index ec633508c4..a736114296 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -1,56 +1,57 @@ page_title: Installation on Windows -page_description: Please note this project is currently under heavy development. It should not be used in production. +page_description: Docker installation on Microsoft Windows page_keywords: Docker, Docker documentation, Windows, requirements, virtualbox, boot2docker # Windows -Docker can run on Windows using a virtualization platform like -VirtualBox. A Linux distribution is run inside a virtual machine and -that's where Docker will run. - -## Installation - > **Note**: > Docker is still under heavy development! We don't recommend using it in > production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) -1. Install virtualbox from [https://www.virtualbox.org]( - https://www.virtualbox.org) - or follow this [tutorial]( - http://www.slideshare.net/julienbarbier42/install-virtualbox-on-windows-7). -2. Download the latest boot2docker.iso from - [https://github.com/boot2docker/boot2docker/releases]( - https://github.com/boot2docker/boot2docker/releases). -3. Start VirtualBox. -4. Create a new Virtual machine with the following settings: +Docker Engine runs on Windows using a lightweight virtual machine. There +is no native Windows Docker client yet, so everything is done inside the virtual +machine. - - Name: boot2docker - - Type: Linux - - Version: Linux 2.6 (64 bit) - - Memory size: 1024 MB - - Hard drive: Do not add a virtual hard drive +To make this process easier we designed a helper application called +[boot2docker](https://github.com/boot2docker/boot2docker) to install the +virtual machine and run the Docker daemon. -5. Open the settings of the virtual machine: - 5.1. go to Storage - 5.2. click the empty slot below Controller: IDE - 5.3. click the disc icon on the right of IDE Secondary Master - 5.4. click Choose a virtual CD/DVD disk file +## Installation -6. Browse to the path where you`ve saved the boot2docker.iso, select - the boot2docker.iso and click open. +1. Download the latest release of the [Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases) +2. Run the installer, which will install VirtualBox, MSYS-git, the boot2docker Linux ISO and the + Boot2Docker management tool. + ![](/installation/images/windows-installer.png) +3. Run the `Boot2Docker Start` shell script from your Desktop or Program Files > Docker. + The Start script will ask you to enter an ssh key passphrase - the simplest + (but least secure) is to just hit [Enter]. + ![](/installation/images/windows-boot2docker-start.png) -7. Click OK on the Settings dialog to save the changes and close the - window. +The `Boot2Docker Start` script will connect you to a shell session in the virtual +Machine. If needed, it will initialise a new VM and start it. -8. Start the virtual machine by clicking the green start button. +## Upgrading + +To upgrade: + +1. Download the latest release of the [Docker for Windows Installer]( + https://github.com/boot2docker/windows-installer/releases) +2. Run the installer, which will update the Boot2Docker management tool. +3. To upgrade your existing virtual machine, open a terminal and run: + +``` + boot2docker stop + boot2docker download + boot2docker start +``` -9. The boot2docker virtual machine should boot now. ## Running Docker -boot2docker will log you in automatically so you can start using Docker +Boot2Docker will log you in automatically so you can start using Docker right away. Let's try the “hello world” example. Run @@ -59,10 +60,40 @@ Let's try the “hello world” example. Run This will download the small busybox image and print hello world. -## Observations +# Further Details -### Persistent storage +The Boot2Docker management tool provides some commands: -The virtual machine created above lacks any persistent data storage. All -images and containers will be lost when shutting down or rebooting the -VM. +``` +$ ./boot2docker +Usage: ./boot2docker [] {help|init|up|ssh|save|down|poweroff|reset|restart|config|status|info|delete|download|version} [] +``` + +## Container port redirection + +The latest version of `boot2docker` sets up two network adaptors: one using NAT +to allow the VM to download images and files from the Internet, and one host only +network adaptor to which the container's ports will be exposed on. + +If you run a container with an exposed port: + +``` + docker run --rm -i -t -p 80:80 apache +``` + +Then you should be able to access that Apache server using the IP address reported +to you using: + +``` + boot2docker ssh ip addr show dev eth1 +``` + +Typically, it is 192.168.59.103, but at this point it can change. + +If you want to share container ports with other computers on your LAN, you will +need to set up [NAT adaptor based port forwarding]( +https://github.com/boot2docker/boot2docker/blob/master/doc/WORKAROUNDS.md) + + + +For further information or to report issues, please see the [Boot2Docker site](http://boot2docker.io) diff --git a/docs/sources/introduction/get-docker.md b/docs/sources/introduction/get-docker.md deleted file mode 100644 index e0d6f16654..0000000000 --- a/docs/sources/introduction/get-docker.md +++ /dev/null @@ -1,77 +0,0 @@ -page_title: Getting Docker -page_description: Getting Docker and installation tutorials -page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile - -# Getting Docker - -*How to install Docker?* - -## Introductions - -Once you are comfortable with your level of knowledge of Docker, and -feel like actually trying the product, you can download and start using -it by following the links listed below. There, you will find -installation instructions, specifically tailored for your platform of choice. - -## Installation Instructions - -### Linux (Native) - - - **Arch Linux:** - [Installation on Arch Linux](../installation/archlinux.md) - - **Fedora:** - [Installation on Fedora](../installation/fedora.md) - - **FrugalWare:** - [Installation on FrugalWare](../installation/frugalware.md) - - **Gentoo:** - [Installation on Gentoo](../installation/gentoolinux.md) - - **Red Hat Enterprise Linux:** - [Installation on Red Hat Enterprise Linux](../installation/rhel.md) - - **Ubuntu:** - [Installation on Ubuntu](../installation/ubuntulinux.md) - - **openSUSE:** - [Installation on openSUSE](../installation/openSUSE.md) - -### Mac OS X (Using Boot2Docker) - -In order to work, Docker makes use of some Linux Kernel features which -are not supported by Mac OS X. To run Docker on OS X we install and run -a lightweight virtual machine and run Docker on that. - - - **Mac OS X :** - [Installation on Mac OS X](../installation/mac.md) - -### Windows (Using Boot2Docker) - -Docker can also run on Windows using a virtual machine. You then run -Linux and Docker inside that virtual machine. - - - **Windows:** - [Installation on Windows](../installation/windows.md) - -### Infrastructure-as-a-Service - - - **Amazon EC2:** - [Installation on Amazon EC2](../installation/amazon.md) - - **Google Cloud Platform:** - [Installation on Google Cloud Platform](../installation/google.md) - - **Rackspace Cloud:** - [Installation on Rackspace Cloud](../installation/rackspace.md) - -## Where to go from here - -### Understanding Docker - -Visit [Understanding Docker](understanding-docker.md) in our Getting Started manual. - -### Learn about parts of Docker and the underlying technology - -Visit [Understanding the Technology](technology.md) in our Getting Started manual. - -### Get practical and learn how to use Docker straight away - -Visit [Working with Docker](working-with-docker.md) in our Getting Started manual. - -### Get the whole story - -[https://www.docker.io/the_whole_story/](https://www.docker.io/the_whole_story/) diff --git a/docs/sources/introduction/technology.md b/docs/sources/introduction/technology.md deleted file mode 100644 index a724e4aae6..0000000000 --- a/docs/sources/introduction/technology.md +++ /dev/null @@ -1,268 +0,0 @@ -page_title: Understanding the Technology -page_description: Technology of Docker explained in depth -page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile - -# Understanding the Technology - -*What is the architecture of Docker? What is its underlying technology?* - -## Introduction - -When it comes to understanding Docker and its underlying technology -there is no *magic* involved. Everything is based on tried and tested -features of the *Linux kernel*. Docker either makes use of those -features directly or builds upon them to provide new functionality. - -Aside from the technology, one of the major factors that make Docker -great is the way it is built. The project's core is very lightweight and -as much of Docker as possible is designed to be pluggable. Docker is -also built with integration in mind and has a fully featured API that -allows you to access all of the power of Docker from inside your own -applications. - -## The Architecture of Docker - -Docker is designed for developers and sysadmins. It's built to help you -build applications and services and then deploy them quickly and -efficiently: from development to production. - -Let's take a look. - --- Docker is a client-server application. --- Both the Docker client and the daemon *can* run on the same system, or; --- You can connect a Docker client with a remote Docker daemon. --- They communicate via sockets or through a RESTful API. --- Users interact with the client to command the daemon, e.g. to create, run, and stop containers. --- The daemon, receiving those commands, does the job, e.g. run a container, stop a container. - -![Docker Architecture Diagram](/article-img/architecture.svg) - -## The components of Docker - -Docker's main components are: - - - Docker *daemon*; - - Docker *client*, and; - - [Docker.io](https://index.docker.io) registry. - -### The Docker daemon - -As shown on the diagram above, the Docker daemon runs on a host machine. -The user does not directly interact with the daemon, but instead through -an intermediary: the Docker client. - -### Docker client - -The Docker client is the primary user interface to Docker. It is tasked -with accepting commands from the user and communicating back and forth -with a Docker daemon to manage the container lifecycle on any host. - -### Docker.io registry - -[Docker.io](https://index.docker.io) is the global archive (and -directory) of user supplied Docker container images. It currently hosts -a large – in fact, rapidly growing – number of projects where you -can find almost any popular application or deployment stack readily -available to download and run with a single command. - -As a social community project, Docker tries to provide all necessary -tools for everyone to grow with other *Dockers*. By issuing a single -command through the Docker client you can start sharing your own -creations with the rest of the world. - -However, knowing that not everything can be shared the [Docker.io]( -https://index.docker.io) also offers private repositories. In order to see -the available plans, you can click [here](https://index.docker.io/plans). - -Using [*docker-registry*](https://github.com/dotcloud/docker-registry), it is -also possible to run your own private Docker image registry service on your own -servers. - -> **Note:** To learn more about the [*Docker.io*](http://index.docker.io) -> registry (for public *and* private repositories), check out the [Registry & -> Index Spec](http://docs.docker.io/api/registry_index_spec/). - -### Summary - - - **When you install Docker, you get all the components:** - The daemon, the client and access to the [Docker.io](http://index.docker.io) registry. - - **You can run these components together or distributed:** - Servers with the Docker daemon running, controlled by the Docker client. - - **You can benefit form the public registry:** - Download and build upon images created by the community. - - **You can start a private repository for proprietary use.** - Sign up for a [plan](https://index.docker.io/plans) or host your own [docker-registry]( -https://github.com/dotcloud/docker-registry). - -## Elements of Docker - -The basic elements of Docker are: - - - **Containers, which allow:** - The run portion of Docker. Your applications run inside of containers. - - **Images, which provide:** - The build portion of Docker. Your containers are built from images. - - **The Dockerfile, which automates:** - A file that contains simple instructions that build Docker images. - -To get practical and learn what they are, and **_how to work_** with -them, continue to [Working with Docker](working-with-docker.md). If you would like to -understand **_how they work_**, stay here and continue reading. - -## The underlying technology - -The power of Docker comes from the underlying technology it is built -from. A series of operating system features are carefully glued together -to provide Docker's features and provide an easy to use interface to -those features. In this section, we will see the main operating system -features that Docker uses to make easy containerization happen. - -### Namespaces - -Docker takes advantage of a technology called `namespaces` to provide -an isolated workspace we call a *container*. When you run a container, -Docker creates a set of *namespaces* for that container. - -This provides a layer of isolation: each process runs in its own -namespace and does not have access outside it. - -Some of the namespaces Docker uses are: - - - **The `pid` namespace:** - Used for process numbering (PID: Process ID) - - **The `net` namespace:** - Used for managing network interfaces (NET: Networking) - - **The `ipc` namespace:** - Used for managing access to IPC resources (IPC: InterProcess Communication) - - **The `mnt` namespace:** - Used for managing mount-points (MNT: Mount) - - **The `uts` namespace:** - Used for isolating kernel / version identifiers. (UTS: Unix Timesharing System) - -### Control groups - -Docker also makes use of another technology called `cgroups` or control -groups. A key need to run applications in isolation is to have them -contained, not just in terms of related filesystem and/or dependencies, -but also, resources. Control groups allow Docker to fairly -share available hardware resources to containers and if asked, set up to -limits and constraints, for example limiting the memory to a maximum of 128 -MBs. - -### UnionFS - -UnionFS or union filesystems are filesystems that operate by creating -layers, making them very lightweight and fast. Docker uses union -filesystems to provide the building blocks for containers. We'll see -more about this below. - -### Containers - -Docker combines these components to build a container format we call -`libcontainer`. Docker also supports traditional Linux containers like -[LXC](https://linuxcontainers.org/) which also make use of these -components. - -## How does everything work - -A lot happens when Docker creates a container. - -Let's see how it works! - -### How does a container work? - -A container consists of an operating system, user added files and -meta-data. Each container is built from an image. That image tells -Docker what the container holds, what process to run when the container -is launched and a variety of other configuration data. The Docker image -is read-only. When Docker runs a container from an image it adds a -read-write layer on top of the image (using the UnionFS technology we -saw earlier) to run inside the container. - -### What happens when you run a container? - -The Docker client (or the API!) tells the Docker daemon to run a -container. Let's take a look at a simple `Hello world` example. - - $ docker run -i -t ubuntu /bin/bash - -Let's break down this command. The Docker client is launched using the -`docker` binary. The bare minimum the Docker client needs to tell the -Docker daemon is: - -* What Docker image to build the container from; -* The command you want to run inside the container when it is launched. - -So what happens under the covers when we run this command? - -Docker begins with: - - - **Pulling the `ubuntu` image:** - Docker checks for the presence of the `ubuntu` image and if it doesn't - exist locally on the host, then Docker downloads it from [Docker.io](https://index.docker.io) - - **Creates a new container:** - Once Docker has the image it creates a container from it. - - **Allocates a filesystem and mounts a read-write _layer_:** - The container is created in the filesystem and a read-write layer is added to the image. - - **Allocates a network / bridge interface:** - Creates a network interface that allows the Docker container to talk to the local host. - - **Sets up an IP address:** - Intelligently finds and attaches an available IP address from a pool. - - **Executes _a_ process that you specify:** - Runs your application, and; - - **Captures and provides application output:** - Connects and logs standard input, outputs and errors for you to see how your application is running. - -### How does a Docker Image work? - -We've already seen that Docker images are read-only templates that -Docker containers are launched from. When you launch that container it -creates a read-write layer on top of that image that your application is -run in. - -Docker images are built using a simple descriptive set of steps we -call *instructions*. Instructions are stored in a file called a -`Dockerfile`. Each instruction writes a new layer to an image using the -UnionFS technology we saw earlier. - -Every image starts from a base image, for example `ubuntu` a base Ubuntu -image or `fedora` a base Fedora image. Docker builds and provides these -base images via [Docker.io](http://index.docker.io). - -### How does a Docker registry work? - -The Docker registry is a store for your Docker images. Once you build a -Docker image you can *push* it to a public or private repository on [Docker.io]( -http://index.docker.io) or to your own registry running behind your firewall. - -Using the Docker client, you can search for already published images and -then pull them down to your Docker host to build containers from them -(or even build on these images). - -[Docker.io](http://index.docker.io) provides both public and -private storage for images. Public storage is searchable and can be -downloaded by anyone. Private repositories are excluded from search -results and only you and your users can pull them down and use them to -build containers. You can [sign up for a plan here](https://index.docker.io/plans). - -To learn more, check out the [Working with Repositories]( -http://docs.docker.io/use/workingwithrepository) section from the -[Docker documentation](http://docs.docker.io). - -## Where to go from here - -### Understanding Docker - -Visit [Understanding Docker](understanding-docker.md) in our Getting Started manual. - -### Get practical and learn how to use Docker straight away - -Visit [Working with Docker](working-with-docker.md) in our Getting Started manual. - -### Get the product and go hands-on - -Visit [Get Docker](get-docker.md) in our Getting Started manual. - -### Get the whole story - -[https://www.docker.io/the_whole_story/](https://www.docker.io/the_whole_story/) diff --git a/docs/sources/introduction/understanding-docker.md b/docs/sources/introduction/understanding-docker.md index 53f5e43179..e9041420af 100644 --- a/docs/sources/introduction/understanding-docker.md +++ b/docs/sources/introduction/understanding-docker.md @@ -1,38 +1,129 @@ page_title: Understanding Docker page_description: Docker explained in depth -page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile +page_keywords: docker, introduction, documentation, about, technology, understanding # Understanding Docker -*What is Docker? What makes it great?* +**What is Docker?** -Building development lifecycles, pipelines and deployment tooling is -hard. It's not easy to create portable applications and services. -There's often high friction getting code from your development -environment to production. It's also hard to ensure those applications -and services are consistent, up-to-date and managed. +Docker is a platform for developing, shipping, and running applications. +Docker is designed to deliver your applications faster. With Docker you +can separate your applications from your infrastructure AND treat your +infrastructure like a managed application. We want to help you ship code +faster, test faster, deploy faster and shorten the cycle between writing +code and running code. -Docker is designed to solve these problem for both developers and -sysadmins. It is a lightweight framework (with a powerful API) that -provides a lifecycle for building and deploying applications into -containers. +Docker does this by combining a lightweight container virtualization +platform with workflow and tooling that helps you manage and deploy your +applications. -Docker provides a way to run almost any application securely isolated -into a container. The isolation and security allows you to run many -containers simultaneously on your host. The lightweight nature of +At its core Docker provides a way to run almost any application securely +isolated into a container. The isolation and security allows you to run +many containers simultaneously on your host. The lightweight nature of containers, which run without the extra overload of a hypervisor, means you can get more out of your hardware. -**Note:** Docker itself is *shipped* with the Apache 2.0 license and it -is completely open-source — *the pun? very much intended*. +Surrounding the container virtualization, we provide tooling and a +platform to help you get your applications (and its supporting +components) into Docker containers, to distribute and ship those +containers to your teams to develop and test on them and then to deploy +those applications to your production environment whether it be in a +local data center or the Cloud. -### What are the Docker basics I need to know? +## What can I use Docker for? -Docker has three major components: +* Faster delivery of your applications + +Docker is perfect for helping you with the development lifecycle. Docker +can allow your developers to develop on local containers that contain +your applications and services. It can integrate into a continuous +integration and deployment workflow. + +Your developers write code locally and share their development stack via +Docker with their colleagues. When they are ready they can push their +code and the stack they are developing on to a test environment and +execute any required tests. From the testing environment you can then +push your Docker images into production and deploy your code. + +* Deploy and scale more easily + +Docker's container platform allows you to have highly portable +workloads. Docker containers can run on a developer's local host, on +physical or virtual machines in a data center or in the Cloud. + +Docker's portability and lightweight nature also makes managing +workloads dynamically easy. You can use Docker to build and scale out +applications and services. Docker's speed means that scaling can be near +real time. + +* Get higher density and run more workloads + +Docker is lightweight and fast. It provides a viable (and +cost-effective!) alternative to hypervisor-based virtual machines. This +is especially useful in high density environments, for example building +your own Cloud or Platform-as-a-Service. But it is also useful +for small and medium deployments where you want to get more out of the +resources you have. + +## What are the major Docker components? + +Docker has two major components: + +* Docker: the open source container virtualization platform. +* [Docker.io](https://index.docker.io): our Software-as-a-Service + platform for sharing and managing Docker containers. + +**Note:** Docker is licensed with the open source Apache 2.0 license. + +## What is the architecture of Docker? + +Docker has a client-server architecture. The Docker *client* talks to +the Docker *daemon* which does the heavy lifting of building, running +and distributing your Docker containers. Both the Docker client and the +daemon *can* run on the same system, or you can connect a Docker client +with a remote Docker daemon. The Docker client and service can +communicate via sockets or through a RESTful API. + +![Docker Architecture Diagram](/article-img/architecture.svg) + +### The Docker daemon + +As shown on the diagram above, the Docker daemon runs on a host machine. +The user does not directly interact with the daemon, but instead through +the Docker client. + +### The Docker client + +The Docker client, in the form of the `docker` binary, is the primary user +interface to Docker. It is tasked with accepting commands from the user +and communicating back and forth with a Docker daemon. + +### Inside Docker + +Inside Docker there are three concepts we’ll need to understand: -* Docker containers. * Docker images. * Docker registries. +* Docker containers. + +#### Docker images + +The Docker image is a read-only template, for example an Ubuntu operating system +with Apache and your web application installed. Docker containers are +created from images. You can download Docker images that other people +have created or Docker provides a simple way to build new images or +update existing images. You can consider Docker images to be the **build** +portion of Docker. + +#### Docker Registries + +Docker registries hold images. These are public (or private!) stores +that you can upload or download images to and from. The public Docker +registry is called [Docker.io](http://index.docker.io). It provides a +huge collection of existing images that you can use. These images can be +images you create yourself or you can make use of images that others +have previously created. You can consider Docker registries the +**distribution** portion of Docker. #### Docker containers @@ -40,233 +131,198 @@ Docker containers are like a directory. A Docker container holds everything that is needed for an application to run. Each container is created from a Docker image. Docker containers can be run, started, stopped, moved and deleted. Each container is an isolated and secure -application platform. You can consider Docker containers the *run* -portion of the Docker framework. +application platform. You can consider Docker containers the **run** +portion of Docker. -#### Docker images +## So how does Docker work? -The Docker image is a template, for example an Ubuntu -operating system with Apache and your web application installed. Docker -containers are launched from images. Docker provides a simple way to -build new images or update existing images. You can consider Docker -images to be the *build* portion of the Docker framework. +We've learned so far that: -#### Docker Registries +1. You can build Docker images that hold your applications. +2. You can create Docker containers from those Docker images to run your + applications. +3. You can share those Docker images via + [Docker.io](https://index.docker.io) or your own registry. + +Let's look at how these elements combine together to make Docker work. + +### How does a Docker Image work? + +We've already seen that Docker images are read-only templates that +Docker containers are launched from. Each image consists of a series of +layers. Docker makes use of [union file +systems](http://en.wikipedia.org/wiki/UnionFS) to combine these layers +into a single image. Union file systems allow files and directories of +separate file systems, known as branches, to be transparently overlaid, +forming a single coherent file system. + +One of the reasons Docker is so lightweight is because of these layers. +When you change a Docker image, for example update an application to a +new version, this builds a new layer. Hence, rather than replacing the whole +image or entirely rebuilding, as you may do with a virtual machine, only +that layer is added or updated. Now you don't need to distribute a whole new image, +just the update, making distributing Docker images fast and simple. + +Every image starts from a base image, for example `ubuntu`, a base Ubuntu +image, or `fedora`, a base Fedora image. You can also use images of your +own as the basis for a new image, for example if you have a base Apache +image you could use this as the base of all your web application images. + +> **Note:** +> Docker usually gets these base images from [Docker.io](https://index.docker.io). + +Docker images are then built from these base images using a simple +descriptive set of steps we call *instructions*. Each instruction +creates a new layer in our image. Instructions include steps like: + +* Run a command. +* Add a file or directory. +* Create an environment variable. +* What process to run when launching a container from this image. + +These instructions are stored in a file called a `Dockerfile`. Docker +reads this `Dockerfile` when you request an image be built, executes the +instructions and returns a final image. + +### How does a Docker registry work? + +The Docker registry is the store for your Docker images. Once you build +a Docker image you can *push* it to a public registry [Docker.io]( +https://index.docker.io) or to your own registry running behind your +firewall. + +Using the Docker client, you can search for already published images and +then pull them down to your Docker host to build containers from them. + +[Docker.io](https://index.docker.io) provides both public and +private storage for images. Public storage is searchable and can be +downloaded by anyone. Private storage is excluded from search +results and only you and your users can pull them down and use them to +build containers. You can [sign up for a plan +here](https://index.docker.io/plans). + +### How does a container work? + +A container consists of an operating system, user added files and +meta-data. As we've discovered each container is built from an image. That image tells +Docker what the container holds, what process to run when the container +is launched and a variety of other configuration data. The Docker image +is read-only. When Docker runs a container from an image it adds a +read-write layer on top of the image (using a union file system as we +saw earlier) in which your application is then run. + +### What happens when you run a container? + +The Docker client using the `docker` binary, or via the API, tells the +Docker daemon to run a container. Let's take a look at what happens +next. + + $ docker run -i -t ubuntu /bin/bash + +Let's break down this command. The Docker client is launched using the +`docker` binary with the `run` option telling it to launch a new +container. The bare minimum the Docker client needs to tell the +Docker daemon to run the container is: + +* What Docker image to build the container from, here `ubuntu`, a base + Ubuntu image; +* The command you want to run inside the container when it is launched, + here `bin/bash` to shell the Bash shell inside the new container. + +So what happens under the covers when we run this command? + +Docker begins with: + +- **Pulling the `ubuntu` image:** + Docker checks for the presence of the `ubuntu` image and if it doesn't + exist locally on the host, then Docker downloads it from + [Docker.io](https://index.docker.io). If the image already exists then + Docker uses it for the new container. +- **Creates a new container:** + Once Docker has the image it creates a container from it: + * **Allocates a filesystem and mounts a read-write _layer_:** + The container is created in the file system and a read-write layer is + added to the image. + * **Allocates a network / bridge interface:** + Creates a network interface that allows the Docker container to talk to + the local host. + * **Sets up an IP address:** + Finds and attaches an available IP address from a pool. +- **Executes a process that you specify:** + Runs your application, and; +- **Captures and provides application output:** + Connects and logs standard input, outputs and errors for you to see how + your application is running. + +Now you have a running container! From here you can manage your running +container, interact with your application and then when finished stop +and remove your container. + +## The underlying technology + +Docker is written in Go and makes use of several Linux kernel features to +deliver the features we've seen. + +### Namespaces + +Docker takes advantage of a technology called `namespaces` to provide an +isolated workspace we call a *container*. When you run a container, +Docker creates a set of *namespaces* for that container. + +This provides a layer of isolation: each aspect of a container runs in +its own namespace and does not have access outside it. + +Some of the namespaces that Docker uses are: + + - **The `pid` namespace:** + Used for process isolation (PID: Process ID). + - **The `net` namespace:** + Used for managing network interfaces (NET: Networking). + - **The `ipc` namespace:** + Used for managing access to IPC resources (IPC: InterProcess +Communication). + - **The `mnt` namespace:** + Used for managing mount-points (MNT: Mount). + - **The `uts` namespace:** + Used for isolating kernel and version identifiers. (UTS: Unix Timesharing +System). + +### Control groups + +Docker also makes use of another technology called `cgroups` or control +groups. A key need to run applications in isolation is to have them only +use the resources you want. This ensures containers are good +multi-tenant citizens on a host. Control groups allow Docker to +share available hardware resources to containers and if required, set up to +limits and constraints, for example limiting the memory available to a +specific container. + +### Union file systems + +Union file systems or UnionFS are file systems that operate by creating +layers, making them very lightweight and fast. Docker uses union file +systems to provide the building blocks for containers. We learned about +union file systems earlier in this document. Docker can make use of +several union file system variants including: AUFS, btrfs, vfs, and +DeviceMapper. + +### Container format -Docker registries hold images. These are public (or private!) stores -that you can upload or download images to and from. These images can be -images you create yourself or you can make use of images that others -have previously created. Docker registries allow you to build simple and -powerful development and deployment work flows. You can consider Docker -registries the *share* portion of the Docker framework. +Docker combines these components into a wrapper we call a container +format. The default container format is called `libcontainer`. Docker +also supports traditional Linux containers using +[LXC](https://linuxcontainers.org/). In future Docker may support other +container formats, for example integration with BSD Jails or Solaris +Zones. + +## Next steps -### How does Docker work? +### Installing Docker + +Visit the [installation](/installation/#installation) section. + +### The Docker User Guide + +[Learn how to use Docker](/userguide/). -Docker is a client-server framework. The Docker *client* commands the Docker -*daemon*, which in turn creates, builds and manages containers. -The Docker daemon takes advantage of some neat Linux kernel and -operating system features, like `namespaces` and `cgroups`, to build -isolated container. Docker provides a simple abstraction layer to these -technologies. - -> **Note:** If you would like to learn more about the underlying technology, -> why not jump to [Understanding the Technology](technology.md) where we talk about them? You can -> always come back here to continue learning about features of Docker and what -> makes it different. - -## Features of Docker - -In order to get a good grasp of the capabilities of Docker you should -read the [User's Manual](http://docs.docker.io). Let's look at a summary -of Docker's features to give you an idea of how Docker might be useful -to you. - -### User centric and simple to use - -*Docker is made for humans.* - -It's easy to get started and easy to build and deploy applications with -Docker: or as we say "*dockerize*" them! As much of Docker as possible -uses plain English for commands and tries to be as lightweight and -transparent as possible. We want to get out of the way so you can build -and deploy your applications. - -### Docker is Portable - -*Dockerize And Go!* - -Docker containers are highly portable. Docker provides a standard -container format to hold your applications: - -* You take care of your applications inside the container, and; -* Docker takes care of managing the container. - -Any machine, be it bare-metal or virtualized, can run any Docker -container. The sole requirement is to have Docker installed. - -**This translates to:** - - - Reliability; - - Freeing your applications out of the dependency-hell; - - A natural guarantee that things will work, anywhere. - -### Lightweight - -*No more resources waste.* - -Containers are lightweight, in fact, they are extremely lightweight. -Unlike traditional virtual machines, which have the overhead of a -hypervisor, Docker relies on operating system level features to provide -isolation and security. A Docker container does not need anything more -than what your application needs to run. - -This translates to: - - - Ability to deploy a large number of applications on a single system; - - Lightning fast start up times and reduced overhead. - -### Docker can run anything - -*An amazing host! (again, pun intended.)* - -Docker isn't prescriptive about what applications or services you can run -inside containers. We provide use cases and examples for running web -services, databases, applications - just about anything you can imagine -can run in a Docker container. - -**This translates to:** - - - Ability to run a wide range of applications; - - Ability to deploy reliably without repeating yourself. - -### Plays well with others - -*A wonderful guest.* - -Today, it is possible to install and use Docker almost anywhere. Even on -non-Linux systems such as Windows or Mac OS X thanks to a project called -[Boot2Docker](http://boot2docker.io). - -**This translates to running Docker (and Docker containers!) _anywhere_:** - - - **Linux:** - Ubuntu, CentOS / RHEL, Fedora, Gentoo, openSUSE and more. - - **Infrastructure-as-a-Service:** - Amazon AWS, Google GCE, Rackspace Cloud and probably, your favorite IaaS. - - **Microsoft Windows** - - **OS X** - -### Docker is Responsible - -*A tool that you can trust.* - -Docker does not just bring you a set of tools to isolate and run -applications. It also allows you to specify constraints and controls on -those resources. - -**This translates to:** - - - Fine tuning available resources for each application; - - Allocating memory or CPU intelligently to make most of your environment; - -Without dealing with complicated commands or third party applications. - -### Docker is Social - -*Docker knows that No One Is an Island.* - -Docker allows you to share the images you've built with the world. And -lots of people have already shared their own images. - -To facilitate this sharing Docker comes with a public registry called -[Docker.io](http://index.docker.io). If you don't want your images to be -public you can also use private images on [Docker.io](https://index.docker.io) -or even run your own registry behind your firewall. - -**This translates to:** - - - No more wasting time building everything from scratch; - - Easily and quickly save your application stack; - - Share and benefit from the depth of the Docker community. - -## Docker versus Virtual Machines - -> I suppose it is tempting, if the *only* tool you have is a hammer, to -> treat *everything* as if it were a nail. -> — **_Abraham Maslow_** - -**Docker containers are:** - - - Easy on the resources; - - Extremely light to deal with; - - Do not come with substantial overhead; - - Very easy to work with; - - Agnostic; - - Can work *on* virtual machines; - - Secure and isolated; - - *Artful*, *social*, *fun*, and; - - Powerful sand-boxes. - -**Docker containers are not:** - - - Hardware or OS emulators; - - Resource heavy; - - Platform, software or language dependent. - -## Docker Use Cases - -Docker is a framework. As a result it's flexible and powerful enough to -be used in a lot of different use cases. - -### For developers - - - **Developed with developers in mind:** - Build, test and ship applications with nothing but Docker and lean - containers. - - **Re-usable building blocks to create more:** - Docker images are easily updated building blocks. - - **Automatically build-able:** - It has never been this easy to build - *anything*. - - **Easy to integrate:** - A powerful, fully featured API allows you to integrate Docker into your tooling. - -### For sysadmins - - - **Efficient (and DevOps friendly!) lifecycle:** - Operations and developments are consistent, repeatable and reliable. - - **Balanced environments:** - Processes between development, testing and production are leveled. - - **Improvements on speed and integration:** - Containers are almost nothing more than isolated, secure processes. - - **Lowered costs of infrastructure:** - Containers are lightweight and heavy on resources compared to virtual machines. - - **Portable configurations:** - Issues and overheads with dealing with configurations and systems are eliminated. - -### For everyone - - - **Increased security without performance loss:** - Replacing VMs with containers provide security without additional - hardware (or software). - - **Portable:** - You can easily move applications and workloads from different operating - systems and platforms. - -## Where to go from here - -### Learn about Parts of Docker and the underlying technology - -Visit [Understanding the Technology](technology.md) in our Getting Started manual. - -### Get practical and learn how to use Docker straight away - -Visit [Working with Docker](working-with-docker.md) in our Getting Started manual. - -### Get the product and go hands-on - -Visit [Get Docker](get-docker.md) in our Getting Started manual. - -### Get the whole story - -[https://www.docker.io/the_whole_story/](https://www.docker.io/the_whole_story/) diff --git a/docs/sources/introduction/working-with-docker.md b/docs/sources/introduction/working-with-docker.md deleted file mode 100644 index 8d946e6846..0000000000 --- a/docs/sources/introduction/working-with-docker.md +++ /dev/null @@ -1,408 +0,0 @@ -page_title: Working with Docker and the Dockerfile -page_description: Working with Docker and The Dockerfile explained in depth -page_keywords: docker, introduction, documentation, about, technology, understanding, Dockerfile - -# Working with Docker and the Dockerfile - -*How to use and work with Docker?* - -> **Warning! Don't let this long page bore you.** -> If you prefer a summary and would like to see how a specific command -> works, check out the glossary of all available client -> commands on our [User's Manual: Commands Reference]( -> http://docs.docker.io/reference/commandline/cli). - -## Introduction - -On the last page, [Understanding the Technology](technology.md), we covered the -components that make up Docker and learnt about the -underlying technology and *how* everything works. - -Now, it is time to get practical and see *how to work with* the Docker client, -Docker containers and images and the `Dockerfile`. - -> **Note:** You are encouraged to take a good look at the container, -> image and `Dockerfile` explanations here to have a better understanding -> on what exactly they are and to get an overall idea on how to work with -> them. On the next page (i.e., [Get Docker](get-docker.md)), you will be -> able to find links for platform-centric installation instructions. - -## Elements of Docker - -As we mentioned on the, [Understanding the Technology](technology.md) page, the main -elements of Docker are: - - - Containers; - - Images, and; - - The `Dockerfile`. - -> **Note:** This page is more *practical* than *technical*. If you are -> interested in understanding how these tools work behind the scenes -> and do their job, you can always read more on -> [Understanding the Technology](technology.md). - -## Working with the Docker client - -In order to work with the Docker client, you need to have a host with -the Docker daemon installed and running. - -### How to use the client - -The client provides you a command-line interface to Docker. It is -accessed by running the `docker` binary. - -> **Tip:** The below instructions can be considered a summary of our -> *interactive tutorial*. If you prefer a more hands-on approach without -> installing anything, why not give that a shot and check out the -> [Docker Interactive Tutorial](https://www.docker.io/gettingstarted). - -The `docker` client usage consists of passing a chain of arguments: - - # Usage: [sudo] docker [option] [command] [arguments] .. - # Example: - $ docker run -i -t ubuntu /bin/bash - -### Our first Docker command - -Let's get started with our first Docker command by checking the -version of the currently installed Docker client using the `docker -version` command. - - # Usage: [sudo] docker version - # Example: - $ docker version - -This command will not only provide you the version of Docker client you -are using, but also the version of Go (the programming language powering -Docker). - - Client version: 0.8.0 - Go version (client): go1.2 - - Git commit (client): cc3a8c8 - Server version: 0.8.0 - - Git commit (server): cc3a8c8 - Go version (server): go1.2 - - Last stable version: 0.8.0 - -### Finding out all available commands - -The user-centric nature of Docker means providing you a constant stream -of helpful instructions. This begins with the client itself. - -In order to get a full list of available commands run the `docker` -binary: - - # Usage: [sudo] docker - # Example: - $ docker - -You will get an output with all currently available commands. - - Commands: - attach Attach to a running container - build Build a container from a Dockerfile - commit Create a new image from a container's changes - . . . - -### Command usage instructions - -The same way used to learn all available commands can be repeated to find -out usage instructions for a specific command. - -Try typing Docker followed with a `[command]` to see the instructions: - - # Usage: [sudo] docker [command] [--help] - # Example: - $ docker attach - Help outputs . . . - -Or you can pass the `--help` flag to the `docker` binary. - - $ docker images --help - -You will get an output with all available options: - - Usage: docker attach [OPTIONS] CONTAINER - - Attach to a running container - - --no-stdin=false: Do not attach stdin - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - -## Working with images - -### Docker Images - -As we've discovered a Docker image is a read-only template that we build -containers from. Every Docker container is launched from an image and -you can use both images provided by others, for example we've discovered -the base `ubuntu` image provided by Docker, as well as images built by -others. For example we can build an image that runs Apache and our own -web application as a starting point to launch containers. - -### Searching for images - -To search for Docker image we use the `docker search` command. The -`docker search` command returns a list of all images that match your -search criteria together with additional, useful information about that -image. This includes information such as social metrics like how many -other people like the image - we call these "likes" *stars*. We also -tell you if an image is *trusted*. A *trusted* image is built from a -known source and allows you to introspect in greater detail how the -image is constructed. - - # Usage: [sudo] docker search [image name] - # Example: - $ docker search nginx - - NAME DESCRIPTION STARS OFFICIAL TRUSTED - $ dockerfile/nginx Trusted Nginx (http://nginx.org/) Build 6 [OK] - paintedfox/nginx-php5 A docker image for running Nginx with PHP5. 3 [OK] - $ dockerfiles/django-uwsgi-nginx dockerfile and configuration files to buil... 2 [OK] - . . . - -> **Note:** To learn more about trusted builds, check out [this]( -http://blog.docker.io/2013/11/introducing-trusted-builds) blog post. - -### Downloading an image - -Downloading a Docker image is called *pulling*. To do this we hence use the -`docker pull` command. - - # Usage: [sudo] docker pull [image name] - # Example: - $ docker pull dockerfile/nginx - - Pulling repository dockerfile/nginx - 0ade68db1d05: Pulling dependent layers - 27cf78414709: Download complete - b750fe79269d: Download complete - . . . - -As you can see, Docker will download, one by one, all the layers forming -the final image. This demonstrates the *building block* philosophy of -Docker. - -### Listing available images - -In order to get a full list of available images, you can use the -`docker images` command. - - # Usage: [sudo] docker images - # Example: - $ docker images - - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - myUserName/nginx latest a0d6c70867d2 41 seconds ago 578.8 MB - nginx latest 173c2dd28ab2 3 minutes ago 578.8 MB - $ dockerfile/nginx latest 0ade68db1d05 3 weeks ago 578.8 MB - -## Working with containers - -### Docker Containers - -Docker containers are directories on your Docker host that are built -from Docker images. In order to create or start a container, you need an -image. This could be the base `ubuntu` image or an image built and -shared with you or an image you've built yourself. - -### Running a new container from an image - -The easiest way to create a new container is to *run* one from an image. - - # Usage: [sudo] docker run [arguments] .. - # Example: - $ docker run -d --name nginx_web nginx /usr/sbin/nginx - -This will create a new container from an image called `nginx` which will -launch the command `/usr/sbin/nginx` when the container is run. We've -also given our container a name, `nginx_web`. - -Containers can be run in two modes: - -* Interactive; -* Daemonized; - -An interactive container runs in the foreground and you can connect to -it and interact with it. A daemonized container runs in the background. - -A container will run as long as the process you have launched inside it -is running, for example if the `/usr/bin/nginx` process stops running -the container will also stop. - -### Listing containers - -We can see a list of all the containers on our host using the `docker -ps` command. By default the `docker ps` command only shows running -containers. But we can also add the `-a` flag to show *all* containers - -both running and stopped. - - # Usage: [sudo] docker ps [-a] - # Example: - $ docker ps - - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 842a50a13032 $ dockerfile/nginx:latest nginx 35 minutes ago Up 30 minutes 0.0.0.0:80->80/tcp nginx_web - -### Stopping a container - -You can use the `docker stop` command to stop an active container. This will gracefully -end the active process. - - # Usage: [sudo] docker stop [container ID] - # Example: - $ docker stop nginx_web - nginx_web - -If the `docker stop` command succeeds it will return the name of -the container it has stopped. - -### Starting a Container - -Stopped containers can be started again. - - # Usage: [sudo] docker start [container ID] - # Example: - $ docker start nginx_web - nginx_web - -If the `docker start` command succeeds it will return the name of the -freshly started container. - -## Working with the Dockerfile - -The `Dockerfile` holds the set of instructions Docker uses to build a Docker image. - -> **Tip:** Below is a short summary of our full Dockerfile tutorial. In -> order to get a better-grasp of how to work with these automation -> scripts, check out the [Dockerfile step-by-step -> tutorial](https://www.docker.io/learn/dockerfile). - -A `Dockerfile` contains instructions written in the following format: - - # Usage: Instruction [arguments / command] .. - # Example: - FROM ubuntu - -A `#` sign is used to provide a comment: - - # Comments .. - -> **Tip:** The `Dockerfile` is very flexible and provides a powerful set -> of instructions for building applications. To learn more about the -> `Dockerfile` and its instructions see the [Dockerfile -> Reference](http://docs.docker.io/reference/builder/). - -### First steps with the Dockerfile - -It's a good idea to add some comments to the start of your `Dockerfile` -to provide explanation and exposition to any future consumers, for -example: - - # - # Dockerfile to install Nginx - # VERSION 2 - EDITION 1 - -The first instruction in any `Dockerfile` must be the `FROM` instruction. The `FROM` instruction specifies the image name that this new image is built from, it is often a base image like `ubuntu`. - - # Base image used is Ubuntu: - FROM ubuntu - -Next, we recommend you use the `MAINTAINER` instruction to tell people who manages this image. - - # Maintainer: O.S. Tezer (@ostezer) - MAINTAINER O.S. Tezer, ostezer@gmail.com - -After this we can add additional instructions that represent the steps -to build our actual image. - -### Our Dockerfile so far - -So far our `Dockerfile` will look like. - - # Dockerfile to install Nginx - # VERSION 2 - EDITION 1 - FROM ubuntu - MAINTAINER O.S. Tezer, ostezer@gmail.com - -Let's install a package and configure an application inside our image. To do this we use a new -instruction: `RUN`. The `RUN` instruction executes commands inside our -image, for example. The instruction is just like running a command on -the command line inside a container. - - RUN echo "deb http://archive.ubuntu.com/ubuntu/ raring main universe" >> /etc/apt/sources.list - RUN apt-get update - RUN apt-get install -y nginx - RUN echo "\ndaemon off;" >> /etc/nginx/nginx.conf - -We can see here that we've *run* four instructions. Each time we run an -instruction a new layer is added to our image. Here's we've added an -Ubuntu package repository, updated the packages, installed the `nginx` -package and then echo'ed some configuration to the default -`/etc/nginx/nginx.conf` configuration file. - -Let's specify another instruction, `CMD`, that tells Docker what command -to run when a container is created from this image. - - CMD /usr/sbin/nginx - -We can now save this file and use it build an image. - -### Using a Dockerfile - -Docker uses the `Dockerfile` to build images. The build process is initiated by the `docker build` command. - - # Use the Dockerfile at the current location - # Usage: [sudo] docker build . - # Example: - $ docker build -t="my_nginx_image" . - - Uploading context 25.09 kB - Uploading context - Step 0 : FROM ubuntu - ---> 9cd978db300e - Step 1 : MAINTAINER O.S. Tezer, ostezer@gmail.com - ---> Using cache - ---> 467542d0cdd3 - Step 2 : RUN echo "deb http://archive.ubuntu.com/ubuntu/ raring main universe" >> /etc/apt/sources.list - ---> Using cache - ---> 0a688bd2a48c - Step 3 : RUN apt-get update - ---> Running in de2937e8915a - . . . - Step 10 : CMD /usr/sbin/nginx - ---> Running in b4908b9b9868 - ---> 626e92c5fab1 - Successfully built 626e92c5fab1 - -Here we can see that Docker has executed each instruction in turn and -each instruction has created a new layer in turn and each layer identified -by a new ID. The `-t` flag allows us to specify a name for our new -image, here `my_nginx_image`. - -We can see our new image using the `docker images` command. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - my_nginx_img latest 626e92c5fab1 57 seconds ago 337.6 MB - -## Where to go from here - -### Understanding Docker - -Visit [Understanding Docker](understanding-docker.md) in our Getting Started manual. - -### Learn about parts of Docker and the underlying technology - -Visit [Understanding the Technology](technology.md) in our Getting Started manual. - -### Get the product and go hands-on - -Visit [Get Docker](get-docker.md) in our Getting Started manual. - -### Get the whole story - -[https://www.docker.io/the_whole_story/](https://www.docker.io/the_whole_story/) diff --git a/docs/sources/reference/api/README.md b/docs/sources/reference/api/README.md index a7b8ae1b44..ec1cbcb2c3 100644 --- a/docs/sources/reference/api/README.md +++ b/docs/sources/reference/api/README.md @@ -2,7 +2,7 @@ This directory holds the authoritative specifications of APIs defined and implem * The remote API by which a docker node can be queried over HTTP * The registry API by which a docker node can download and upload - container images for storage and sharing + images for storage and sharing * The index search API by which a docker node can search the public index for images to download * The docker.io OAuth and accounts API which 3rd party services can diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 47f4724b1a..9d4069f70a 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -20,13 +20,38 @@ page_keywords: API, Docker, rcli, REST, documentation -The current version of the API is v1.11 +The current version of the API is v1.12 Calling /images//insert is the same as calling -/v1.11/images//insert +/v1.12/images//insert You can still call an old version of the api using -/v1.11/images//insert +/v1.12/images//insert + +## v1.12 + +### Full Documentation + +[*Docker Remote API v1.12*](/reference/api/docker_remote_api_v1.12/) + +### What's new + +`POST /build` + +**New!** +Build now has support for the `forcerm` parameter to always remove containers + +`GET /containers/(name)/json` +`GET /images/(name)/json` + +**New!** +All the JSON keys are now in CamelCase + +**New!** +Trusted builds are now Automated Builds - `is_trusted` is now `is_automated`. + +**Removed Insert Endpoint** +The insert endpoint has been removed. ## v1.11 diff --git a/docs/sources/reference/api/docker_remote_api_v1.0.md b/docs/sources/reference/api/docker_remote_api_v1.0.md index d719ca27e8..ba5338c5f9 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.0.md +++ b/docs/sources/reference/api/docker_remote_api_v1.0.md @@ -7,7 +7,7 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API is replacing rcli -- Default port in the docker daemon is 4243 +- Default port in the docker daemon is 2375 - The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr diff --git a/docs/sources/reference/api/docker_remote_api_v1.1.md b/docs/sources/reference/api/docker_remote_api_v1.1.md index 21997e5488..b884ee69dc 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.1.md +++ b/docs/sources/reference/api/docker_remote_api_v1.1.md @@ -7,7 +7,7 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API is replacing rcli -- Default port in the docker daemon is 4243 +- Default port in the docker daemon is 2375 - The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 721244b49e..9c39611b34 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -7,9 +7,8 @@ page_keywords: API, Docker, rcli, REST, documentation ## 1. Brief introduction - The Remote API has replaced rcli - - The daemon listens on `unix:///var/run/docker.sock` but you can - [*Bind Docker to another host/port or a Unix socket*]( - /use/basics/#bind-docker). + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -372,7 +371,7 @@ Start the container `id` "PublishAllPorts":false, "Privileged":false "Dns": ["8.8.8.8"], - "VolumesFrom: ["parent", "other:ro"] + "VolumesFrom": ["parent", "other:ro"] } **Example response**: @@ -1023,6 +1022,7 @@ Build an image from Dockerfile via stdin the resulting image in case of success - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image + - **rm** - remove intermediate containers after a successful build Request Headers: @@ -1297,4 +1297,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - $ docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 53e07b380c..f639557e13 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -7,9 +7,8 @@ page_keywords: API, Docker, rcli, REST, documentation ## 1. Brief introduction - The Remote API has replaced rcli - - The daemon listens on `unix:///var/run/docker.sock` but you can - [*Bind Docker to another host/port or a Unix socket*]( - /use/basics/#bind-docker). + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -124,7 +123,6 @@ Create a container "Cmd":[ "date" ], - "Dns":null, "Image":"base", "Volumes":{ "/tmp": {} @@ -411,7 +409,9 @@ Start the container `id` "LxcConf":{"lxc.utsname":"docker"}, "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts":false, - "Privileged":false + "Privileged":false, + "Dns": ["8.8.8.8"], + "VolumesFrom": ["parent", "other:ro"] } **Example response**: @@ -757,31 +757,6 @@ Create an image, either by pull it from the registry or by importing it - **200** – no error - **500** – server error -### Insert a file in an image - -`POST /images/(name)/insert` - -Insert a file from `url` in the image `name` at `path` - - **Example request**: - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} - {"error":"Invalid..."} - ... - - Status Codes: - - - **200** – no error - - **500** – server error - ### Inspect an image `GET /images/(name)/json` @@ -1063,6 +1038,7 @@ Build an image from Dockerfile via stdin the resulting image in case of success - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image + - **rm** - remove intermediate containers after a successful build Request Headers: @@ -1358,4 +1334,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - $ docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md new file mode 100644 index 0000000000..08a06a45d4 --- /dev/null +++ b/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -0,0 +1,1373 @@ +page_title: Remote API v1.12 +page_description: API Documentation for Docker +page_keywords: API, Docker, rcli, REST, documentation + +# Docker Remote API v1.12 + +## 1. Brief introduction + + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can + [*Bind Docker to another host/port or a Unix socket*]( + /use/basics/#bind-docker). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + + **Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + } + ] + + Query Parameters: + +   + + - **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default + - **limit** – Show `limit` last created + containers, include non-running ones. + - **since** – Show only containers created since Id, include + non-running ones. + - **before** – Show only containers created before Id, include + non-running ones. + - **size** – 1/True/true or 0/False/false, Show the containers + sizes + + Status Codes: + + - **200** – no error + - **400** – bad parameter + - **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + + **Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + + **Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + + Json Parameters: + +   + + - **config** – the container's configuration + + Query Parameters: + +   + + - **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + + Status Codes: + + - **201** – no error + - **404** – no such container + - **406** – impossible to attach (container not running) + - **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + + **Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir":"" + + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + + **Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles":[ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes":[ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + + Query Parameters: + +   + + - **ps_args** – ps arguments to use (eg. aux) + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + + **Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + Query Parameters: + +   + + - **follow** – 1/True/true or 0/False/false, return stream. + Default false + - **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log. Default false + - **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log. Default false + - **timestamps** – 1/True/true or 0/False/false, if logs=true, print + timestamps for every log line. Default false + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + + **Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } + ] + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + + **Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + + **Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "VolumesFrom": ["parent", "other:ro"] + } + + **Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + + Json Parameters: + +   + + - **hostConfig** – the container's host configuration (optional) + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + + **Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters: + +   + + - **t** – number of seconds to wait before killing the container + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + + **Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters: + +   + + - **t** – number of seconds to wait before killing the container + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + + **Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters + + - **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + + **Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + Query Parameters: + +   + + - **logs** – 1/True/true or 0/False/false, return logs. Default + false + - **stream** – 1/True/true or 0/False/false, return stream. + Default false + - **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false + - **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false + - **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + + Status Codes: + + - **200** – no error + - **400** – bad parameter + - **404** – no such container + - **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + + - 0: stdin (will be writen on stdout) + - 1: stdout + - 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 byets + 4. Read the extracted size and output it on the correct output + 5. Goto 1) + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + + **Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode":0} + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + + **Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters: + +   + + - **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + - **force** – 1/True/true or 0/False/false, Removes the container + even if it was running. Default false + + Status Codes: + + - **204** – no error + - **400** – bad parameter + - **404** – no such container + - **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + + **Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource":"test.txt" + } + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + + Query Parameters: + +   + + - **all** – 1/True/true or 0/False/false, default false + - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. + + + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it + + **Example request**: + + POST /images/create?fromImage=base HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + + Query Parameters: + +   + + - **fromImage** – name of the image to pull + - **fromSrc** – source to import, - means stdin + - **repo** – repository + - **tag** – tag + - **registry** – the registry to pull from + + Request Headers: + +   + + - **X-Registry-Auth** – base64-encoded AuthConfig object + + Status Codes: + + - **200** – no error + - **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + + **Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} + {"error":"Invalid..."} + ... + + Status Codes: + + - **200** – no error + - **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + + **Example request**: + + GET /images/base/json HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created":"2013-03-23T22:24:18.818426-07:00", + "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"] + ,"Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent":"27cf784147099545", + "Size": 6824592 + } + + Status Codes: + + - **200** – no error + - **404** – no such image + - **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + + **Example request**: + + GET /images/base/history HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + + Status Codes: + + - **200** – no error + - **404** – no such image + - **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + + **Example request**: + + POST /images/test/push HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"error":"Invalid..."} + ... + + Query Parameters: + +   + + - **registry** – the registry you wan to push, optional + + Request Headers: + +   + + - **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + + Status Codes: + + - **200** – no error + - **404** – no such image + - **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + + **Example request**: + + POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 + + **Example response**: + + HTTP/1.1 201 OK + + Query Parameters: + +   + + - **repo** – The repository to tag in + - **force** – 1/True/true or 0/False/false, default false + + Status Codes: + + - **201** – no error + - **400** – bad parameter + - **404** – no such image + - **409** – conflict + - **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + + **Example request**: + + DELETE /images/test HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged":"3e2f21a89f"}, + {"Deleted":"3e2f21a89f"}, + {"Deleted":"53b4f83ac9"} + ] + + Query Parameters: + +   + + - **force** – 1/True/true or 0/False/false, default false + - **noprune** – 1/True/true or 0/False/false, default false + + Status Codes: + + - **200** – no error + - **404** – no such image + - **409** – conflict + - **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker.io](https://index.docker.io). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + + **Example request**: + + GET /images/search?term=sshd HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + + Query Parameters: + +   + + - **term** – term to search + + Status Codes: + + - **200** – no error + - **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + + **Example request**: + + POST /build HTTP/1.1 + + {{ STREAM }} + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream":"Step 1..."} + {"stream":"..."} + {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + + Query Parameters: + +   + + - **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success + - **q** – suppress verbose build output + - **nocache** – do not use the cache when building the image + - **rm** - remove intermediate containers after a successful build (default behavior) + - **forcerm - always remove intermediate containers (includes rm) + + Request Headers: + +   + + - **Content-type** – should be set to + `"application/tar"`. + - **X-Registry-Config** – base64-encoded ConfigFile object + + Status Codes: + + - **200** – no error + - **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + + **Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com", + "serveraddress":"https://index.docker.io/v1/" + } + + **Example response**: + + HTTP/1.1 200 OK + + Status Codes: + + - **200** – no error + - **204** – no error + - **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + + **Example request**: + + GET /info HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + + Status Codes: + + - **200** – no error + - **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + + **Example request**: + + GET /version HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + + Status Codes: + + - **200** – no error + - **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + + **Example request**: + + GET /_ping HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + + OK + + Status Codes: + + - **200** - no error + - **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + + **Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + + **Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id":"596069db4bf5"} + + Json Parameters: + + + + - **config** - the container's configuration + + Query Parameters: + +   + + - **container** – source container + - **repo** – repository + - **tag** – tag + - **m** – commit message + - **author** – author (eg. "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + + Status Codes: + + - **201** – no error + - **404** – no such container + - **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or +via polling (using since) + + **Example request**: + + GET /events?since=1374067924 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} + + Query Parameters: + +   + + - **since** – timestamp used for polling + - **until** – timestamp used for polling + + Status Codes: + + - **200** – no error + - **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. + + **Example request** + + GET /images/ubuntu/get + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + + Status Codes: + + - **200** – no error + - **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. + + **Example request** + + POST /images/load + + Tarball in body + + **Example response**: + + HTTP/1.1 200 OK + + Status Codes: + + - **200** – no error + - **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run`: + +- Create the container + +- If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"–api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.md b/docs/sources/reference/api/docker_remote_api_v1.2.md index 17967eab3d..e231cff02f 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.2.md +++ b/docs/sources/reference/api/docker_remote_api_v1.2.md @@ -7,7 +7,7 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API is replacing rcli -- Default port in the docker daemon is 4243 +- Default port in the docker daemon is 2375 - The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr @@ -999,5 +999,5 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. -> docker -d -H="[tcp://192.168.1.9:4243](tcp://192.168.1.9:4243)" +> docker -d -H="[tcp://192.168.1.9:2375](tcp://192.168.1.9:2375)" > –api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md index 9f7bd22e32..71c70273fd 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.3.md +++ b/docs/sources/reference/api/docker_remote_api_v1.3.md @@ -7,7 +7,7 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API is replacing rcli -- Default port in the docker daemon is 4243 +- Default port in the docker daemon is 2375 - The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr @@ -1081,4 +1081,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. -> docker -d -H="192.168.1.9:4243" –api-enable-cors +> docker -d -H="192.168.1.9:2375" –api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.md b/docs/sources/reference/api/docker_remote_api_v1.4.md index 2e7e94f7d4..253944fd9a 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.4.md +++ b/docs/sources/reference/api/docker_remote_api_v1.4.md @@ -7,7 +7,7 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API is replacing rcli -- Default port in the docker daemon is 4243 +- Default port in the docker daemon is 2375 - The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr @@ -1127,4 +1127,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - $ docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.md b/docs/sources/reference/api/docker_remote_api_v1.5.md index 08457bfd94..7dc5334f45 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.5.md +++ b/docs/sources/reference/api/docker_remote_api_v1.5.md @@ -7,7 +7,7 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API is replacing rcli -- Default port in the docker daemon is 4243 +- Default port in the docker daemon is 2375 - The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr @@ -1134,4 +1134,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - $ docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md index bca09a3a0e..021a357b79 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/docker_remote_api_v1.6.md @@ -7,9 +7,8 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API has replaced rcli - - The daemon listens on `unix:///var/run/docker.sock` but you can - [*Bind Docker to another host/port or a Unix socket*]( - /use/basics/#bind-docker). + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -1236,4 +1235,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - $ docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md index 818fbba11c..02073e9b60 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/docker_remote_api_v1.7.md @@ -7,9 +7,8 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API has replaced rcli - - The daemon listens on `unix:///var/run/docker.sock` but you can - [*Bind Docker to another host/port or a Unix socket*]( - /use/basics/#bind-docker). + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -1230,4 +1229,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - $ docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md index 0d2997693c..a691930c6b 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/docker_remote_api_v1.8.md @@ -7,9 +7,8 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API has replaced rcli - - The daemon listens on `unix:///var/run/docker.sock` but you can - [*Bind Docker to another host/port or a Unix socket*]( - /use/basics/#bind-docker). + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -1276,4 +1275,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - $ docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index d8be62a7a7..d67443fd26 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -7,9 +7,8 @@ page_keywords: API, Docker, rcli, REST, documentation # 1. Brief introduction - The Remote API has replaced rcli - - The daemon listens on `unix:///var/run/docker.sock` but you can - [*Bind Docker to another host/port or a Unix socket*]( - /use/basics/#bind-docker). + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -1313,4 +1312,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - $ docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 98e9e0f544..c8af26c5db 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -23,7 +23,7 @@ Then call `docker build` with the path of you source repository as argument The path to the source repository defines where to find the *context* of the build. The build is run by the Docker daemon, not by the CLI, so the whole context must be transferred to the daemon. The Docker CLI reports -"Uploading context" when the context is sent to the daemon. +"Sending build context to Docker daemon" when the context is sent to the daemon. You can specify a repository and tag at which to save the new image if the build succeeds: @@ -57,7 +57,7 @@ accelerating `docker build` significantly (indicated by `Using cache`): When you're done with your build, you're ready to look into [*Pushing a repository to its registry*]( -/use/workingwithrepository/#image-push). +/userguide/dockerrepos/#image-push). ## Format @@ -95,7 +95,7 @@ The `FROM` instruction sets the [*Base Image*](/terms/image/#base-image-def) for subsequent instructions. As such, a valid Dockerfile must have `FROM` as its first instruction. The image can be any valid image – it is especially easy to start by **pulling an image** from the [*Public Repositories*]( -/use/workingwithrepository/#using-public-repositories). +/userguide/dockerrepos/#using-public-repositories). `FROM` must be the first non-comment instruction in the Dockerfile. @@ -131,6 +131,16 @@ any point in an image's history, much like source control. The *exec* form makes it possible to avoid shell string munging, and to `RUN` commands using a base image that does not contain `/bin/sh`. +The cache for `RUN` instructions isn't invalidated automatically during the +next build. The cache for an instruction like `RUN apt-get dist-upgrade -y` +will be reused during the next build. +The cache for `RUN` instructions can be invalidated by using the `--no-cache` +flag, for example `docker build --no-cache`. + +The first encountered `ADD` instruction will invalidate the cache for all +following instructions from the 'Dockerfile' if the contents of the context +have changed. This will also invalidate the cache for `RUN` instructions. + ### Known Issues (RUN) - [Issue 783](https://github.com/dotcloud/docker/issues/783) is about file @@ -144,7 +154,7 @@ commands using a base image that does not contain `/bin/sh`. CMD has three forms: -- `CMD ["executable","param1","param2"]` (like an *exec*, preferred form) +- `CMD ["executable","param1","param2"]` (like an *exec*, this is the preferred form) - `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) - `CMD command param1 param2` (as a *shell*) @@ -190,10 +200,8 @@ default specified in CMD. The `EXPOSE` instructions informs Docker that the container will listen on the specified network ports at runtime. Docker uses this information to interconnect -containers using links (see -[*links*](/use/working_with_links_names/#working-with-links-names)), -and to setup port redirection on the host system (see [*Redirect Ports*]( -/use/port_redirection/#port-redirection)). +containers using links (see the [Docker User +Guide](/userguide/dockerlinks)). ## ENV @@ -225,7 +233,9 @@ being built (also called the *context* of the build) or a remote file URL. `` is the absolute path to which the source will be copied inside the destination container. -All new files and directories are created with mode 0755, uid and gid 0. +All new files and directories are created with a uid and gid of 0. + +In the case where `` is a remote file URL, the destination will have permissions 600. > **Note**: > If you build using STDIN (`docker build - < somefile`), there is no @@ -278,6 +288,46 @@ The copy obeys the following rules: - If `` doesn't exist, it is created along with all missing directories in its path. +## COPY + + COPY + +The `COPY` instruction will copy new files from `` and add them to the +container's filesystem at path ``. + +`` must be the path to a file or directory relative to the source directory +being built (also called the *context* of the build). + +`` is the absolute path to which the source will be copied inside the +destination container. + +All new files and directories are created with a uid and gid of 0. + +> **Note**: +> If you build using STDIN (`docker build - < somefile`), there is no +> build context, so `COPY` can't be used. + +The copy obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `COPY ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a directory, the entire directory is copied, including + filesystem metadata. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + ## ENTRYPOINT ENTRYPOINT has two forms: @@ -325,15 +375,17 @@ optional but default, you could use a CMD: The `VOLUME` instruction will create a mount point with the specified name and mark it as holding externally mounted volumes from native host or other -containers. For more information/examples and mounting instructions via docker -client, refer to [*Share Directories via Volumes*]( -/use/working_with_volumes/#volume-def) documentation. +containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain +string, `VOLUME /var/log`. For more information/examples and mounting +instructions via the Docker client, refer to [*Share Directories via Volumes*]( +/userguide/dockervolumes/#volume-def) documentation. ## USER USER daemon -The `USER` instruction sets the username or UID to use when running the image. +The `USER` instruction sets the username or UID to use when running the image +and for any following `RUN` directives. ## WORKDIR diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 8e0507cbf8..add664abe5 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -50,35 +50,39 @@ expect an integer, and they can only be specified once. ## daemon Usage of docker: + --api-enable-cors=false Enable CORS headers in the remote API + -b, --bridge="" Attach containers to a pre-existing network bridge + use 'none' to disable container networking + --bip="" Use this CIDR notation address for the network bridge's IP, not compatible with -b + -d, --daemon=false Enable daemon mode + -D, --debug=false Enable debug mode + --dns=[] Force docker to use specific DNS servers + --dns-search=[] Force Docker to use specific DNS search domains + -e, --exec-driver="native" Force the docker runtime to use a specific exec driver + -G, --group="docker" Group to assign the unix socket specified by -H when running in daemon mode + use '' (the empty string) to disable setting of a group + -g, --graph="/var/lib/docker" Path to use as the root of the docker runtime + -H, --host=[] The socket(s) to bind to in daemon mode + specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + --icc=true Enable inter-container communication + --ip="0.0.0.0" Default IP address to use when binding container ports + --ip-forward=true Enable net.ipv4.ip_forward + --iptables=true Enable Docker's addition of iptables rules + --mtu=0 Set the containers network MTU + if no value is provided: default to the default route MTU or 1500 if no default route is available + -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file + -r, --restart=true Restart previously running containers + -s, --storage-driver="" Force the docker runtime to use a specific storage driver + --storage-opt=[] Set storage driver options + --selinux-enabled=false Enable selinux support + --tls=false Use TLS; implied by tls-verify flags + --tlscacert="/home/sven/.docker/ca.pem" Trust only remotes providing a certificate signed by the CA given here + --tlscert="/home/sven/.docker/cert.pem" Path to TLS certificate file + --tlskey="/home/sven/.docker/key.pem" Path to TLS key file + --tlsverify=false Use TLS and verify the remote (daemon: verify client, client: verify daemon) + -v, --version=false Print version information and quit - -D, --debug=false: Enable debug mode - -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group - --api-enable-cors=false: Enable CORS headers in the remote API - -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking - -bip="": Use this CIDR notation address for the network bridge᾿s IP, not compatible with -b - -d, --daemon=false: Enable daemon mode - --dns=[]: Force docker to use specific DNS servers - --dns-search=[]: Force Docker to use specific DNS search domains - --enable-selinux=false: Enable selinux support for running containers - -g, --graph="/var/lib/docker": Path to use as the root of the docker runtime - --icc=true: Enable inter-container communication - --ip="0.0.0.0": Default IP address to use when binding container ports - --ip-forward=true: Enable net.ipv4.ip_forward - --iptables=true: Enable Docker᾿s addition of iptables rules - -p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file - -r, --restart=true: Restart previously running containers - -s, --storage-driver="": Force the docker runtime to use a specific storage driver - -e, --exec-driver="native": Force the docker runtime to use a specific exec driver - -v, --version=false: Print version information and quit - --tls=false: Use TLS; implied by tls-verify flags - --tlscacert="~/.docker/ca.pem": Trust only remotes providing a certificate signed by the CA given here - --tlscert="~/.docker/cert.pem": Path to TLS certificate file - --tlskey="~/.docker/key.pem": Path to TLS key file - --tlsverify=false: Use TLS and verify the remote (daemon: verify client, client: verify daemon) - --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available - - Options with [] may be specified multiple times. +Options with [] may be specified multiple times. The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the daemon and client. To run the @@ -100,9 +104,9 @@ To use lxc as the execution driver, use `docker -d -e lxc`. The docker client will also honor the `DOCKER_HOST` environment variable to set the `-H` flag for the client. - $ docker -H tcp://0.0.0.0:4243 ps + $ docker -H tcp://0.0.0.0:2375 ps # or - $ export DOCKER_HOST="tcp://0.0.0.0:4243" + $ export DOCKER_HOST="tcp://0.0.0.0:2375" $ docker ps # both are equal @@ -126,12 +130,12 @@ like this: ## attach -Attach to a running container. + Usage: docker attach [OPTIONS] CONTAINER - Usage: docker attach CONTAINER + Attach to a running container - --no-stdin=false: Do not attach stdin - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --no-stdin=false Do not attach stdin + --sig-proxy=true Proxify all received signal to the process (even in non-tty mode) The `attach` command will allow you to view or interact with any running container, detached (`-d`) @@ -185,15 +189,15 @@ To kill the container, use `docker kill`. ## build -Build a new container image from the source code at PATH - Usage: docker build [OPTIONS] PATH | URL | - - -t, --tag="": Repository name (and optionally a tag) to be applied - to the resulting image in case of success. - -q, --quiet=false: Suppress the verbose output generated by the containers. - --no-cache: Do not use the cache when building the image. - --rm=true: Remove intermediate containers after a successful build + Build a new image from the source code at PATH + + --force-rm=false Always remove intermediate containers, even after unsuccessful builds + --no-cache=false Do not use cache when building the image + -q, --quiet=false Suppress the verbose output generated by the containers + --rm=true Remove intermediate containers after a successful build + -t, --tag="" Repository name (and optionally a tag) to be applied to the resulting image in case of success Use this command to build Docker images from a Dockerfile and a "context". @@ -201,14 +205,15 @@ and a "context". The files at `PATH` or `URL` are called the "context" of the build. The build process may refer to any of the files in the context, for example when using an [*ADD*](/reference/builder/#dockerfile-add) instruction. When a single Dockerfile is -given as `URL`, then no context is set. +given as `URL` or is piped through STDIN (`docker build - < Dockerfile`), then +no context is set. When a Git repository is set as `URL`, then the repository is used as the context. The Git repository is cloned with its submodules (git clone –recursive). A fresh git clone occurs in a temporary directory on your local host, and then this is sent to the Docker daemon as the context. This way, your local user credentials and -vpn's etc can be used to access private repositories +vpn's etc can be used to access private repositories. See also: @@ -255,7 +260,7 @@ happens at the client side (where you're running The transfer of context from the local machine to the Docker daemon is what the `docker` client means when you see the -"Uploading context" message. +"Sending build context" message. If you wish to keep the intermediate containers after the build is complete, you must use `--rm=false`. This does not @@ -283,14 +288,21 @@ repository is used as Dockerfile. Note that you can specify an arbitrary Git repository by using the `git://` schema. -## commit +> **Note:** `docker build` will return a `no such file or directory` error +> if the file or directory does not exist in the uploaded context. This may +> happen if there is no context, or if you specify a file that is elsewhere +> on the Host system. The context is limited to the current directory (and its +> children) for security reasons, and to ensure repeatable builds on remote +> Docker hosts. This is also the reason why `ADD ../file` will not work. -Create a new image from a container᾿s changes +## commit Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] - -m, --message="": Commit message - -a, --author="": Author (eg. "John Hannibal Smith " + Create a new image from a container's changes + + -a, --author="" Author (eg. "John Hannibal Smith " + -m, --message="" Commit message It can be useful to commit a container's file changes or settings into a new image. This allows you debug a container by running an interactive @@ -317,8 +329,7 @@ path. Paths are relative to the root of the filesystem. Usage: docker cp CONTAINER:PATH HOSTPATH - $ sudo docker cp 7bb0e258aefe:/etc/debian_version . - $ sudo docker cp blue_frog:/etc/hosts . + Copy files/folders from the PATH to the HOSTPATH ## diff @@ -326,6 +337,8 @@ List the changed files and directories in a container᾿s filesystem Usage: docker diff CONTAINER + Inspect changes on a container's filesystem + There are 3 events that are listed in the `diff`: 1. `A` - Add @@ -350,14 +363,12 @@ For example: ## events -Get real time events from the server + Usage: docker events [OPTIONS] - Usage: docker events + Get real time events from the server - --since="": Show all events created since timestamp - (either seconds since epoch, or date string as below) - --until="": Show events created before timestamp - (either seconds since epoch, or date string as below) + --since="" Show all events created since timestamp + --until="" Stream events until this timestamp ### Examples @@ -395,22 +406,22 @@ You'll need two shells for this example. ## export -Export the contents of a filesystem as a tar archive to STDOUT - Usage: docker export CONTAINER + Export the contents of a filesystem as a tar archive to STDOUT + For example: $ sudo docker export red_panda > latest.tar ## history -Show the history of an image - Usage: docker history [OPTIONS] IMAGE - --no-trunc=false: Don᾿t truncate output - -q, --quiet=false: Only show numeric IDs + Show the history of an image + + --no-trunc=false Don't truncate output + -q, --quiet=false Only show numeric IDs To see how the `docker:latest` image was built: @@ -425,13 +436,14 @@ To see how the `docker:latest` image was built: ## images -List images - Usage: docker images [OPTIONS] [NAME] - -a, --all=false: Show all images (by default filter out the intermediate image layers) - --no-trunc=false: Don᾿t truncate output - -q, --quiet=false: Only show numeric IDs + List images + + -a, --all=false Show all images (by default filter out the intermediate image layers) + -f, --filter=[]: Provide filter values (i.e. 'dangling=true') + --no-trunc=false Don't truncate output + -q, --quiet=false Only show numeric IDs The default `docker images` will show all top level images, their repository and tags, and their virtual size. @@ -469,12 +481,51 @@ by default. tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB +### Filtering + +The filtering flag (-f or --filter) format is of "key=value". If there are more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +Current filters: + * dangling (boolean - true or false) + +#### untagged images + + $ sudo docker images --filter "dangling=true" + + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + 8abc22fbb042 4 weeks ago 0 B + 48e5f45168b9 4 weeks ago 2.489 MB + bf747efa0e2f 4 weeks ago 0 B + 980fe10e5736 12 weeks ago 101.4 MB + dea752e4e117 12 weeks ago 101.4 MB + 511136ea3c5a 8 months ago 0 B + +This will display untagged images, that are the leaves of the images tree (not +intermediary layers). These images occur when a new build of an image takes the +repo:tag away from the IMAGE ID, leaving it untagged. A warning will be issued +if trying to remove an image when a container is presently using it. +By having this flag it allows for batch cleanup. + +Ready for use by `docker rmi ...`, like: + + $ sudo docker rmi $(sudo docker images -f "dangling=true" -q) + + 8abc22fbb042 + 48e5f45168b9 + bf747efa0e2f + 980fe10e5736 + dea752e4e117 + 511136ea3c5a + +NOTE: Docker will warn you if any containers exist that are using these untagged images. + + ## import Usage: docker import URL|- [REPOSITORY[:TAG]] - Create an empty filesystem image and import the contents of the tarball - (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. + Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. URLs must start with `http` and point to a single file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a @@ -507,10 +558,12 @@ tar, then the ownerships might not get preserved. ## info -Display system-wide information. - Usage: docker info + Display system-wide information + +For example: + $ sudo docker info Containers: 292 Images: 194 @@ -528,11 +581,11 @@ ensure we know how your setup is configured. ## inspect -Return low-level information on a container/image - Usage: docker inspect CONTAINER|IMAGE [CONTAINER|IMAGE...] - -f, --format="": Format the output using the given go template. + Return low-level information on a container/image + + -f, --format="" Format the output using the given go template. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result. @@ -582,31 +635,22 @@ contains complex json object, so to grab it as JSON, you use ## kill -Kill a running container (send SIGKILL, or specified signal) - Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] - -s, --signal="KILL": Signal to send to the container + Kill a running container (send SIGKILL, or specified signal) + + -s, --signal="KILL" Signal to send to the container The main process inside the container will be sent SIGKILL, or any signal specified with option `--signal`. -### Known Issues (kill) - -- [Issue 197](https://github.com/dotcloud/docker/issues/197) indicates - that `docker kill` may leave directories behind - and make it difficult to remove the container. -- [Issue 3844](https://github.com/dotcloud/docker/issues/3844) lxc - 1.0.0 beta3 removed `lcx-kill` which is used by - Docker versions before 0.8.0; see the issue for a workaround. - ## load -Load an image from a tar archive on STDIN - Usage: docker load - -i, --input="": Read from a tar archive file, instead of STDIN + Load an image from a tar archive on STDIN + + -i, --input="" Read from a tar archive file, instead of STDIN Loads a tarred repository from a file or the standard input stream. Restores both images and tags. @@ -628,15 +672,15 @@ Restores both images and tags. ## login -Register or Login to the docker registry server - Usage: docker login [OPTIONS] [SERVER] - -e, --email="": Email - -p, --password="": Password - -u, --username="": Username + Register or Login to a docker registry server, if no server is specified "https://index.docker.io/v1/" is the default. -If you want to login to a private registry you can + -e, --email="" Email + -p, --password="" Password + -u, --username="" Username + +If you want to login to a self-hosted registry you can specify this by adding the server name. example: @@ -644,12 +688,12 @@ specify this by adding the server name. ## logs -Fetch the logs of a container + Usage: docker logs CONTAINER - Usage: docker logs [OPTIONS] CONTAINER + Fetch the logs of a container - -f, --follow=false: Follow log output - -t, --timestamps=false: Show timestamps + -f, --follow=false Follow log output + -t, --timestamps=false Show timestamps The `docker logs` command batch-retrieves all logs present at the time of execution. @@ -660,24 +704,24 @@ and stderr. ## port - Usage: docker port [OPTIONS] CONTAINER PRIVATE_PORT + Usage: docker port CONTAINER PRIVATE_PORT -Lookup the public-facing port which is NAT-ed to PRIVATE_PORT + Lookup the public-facing port which is NAT-ed to PRIVATE_PORT ## ps -List containers - Usage: docker ps [OPTIONS] - -a, --all=false: Show all containers. Only running containers are shown by default. - --before="": Show only container created before Id or Name, include non-running ones. - -l, --latest=false: Show only the latest created container, include non-running ones. - -n=-1: Show n last created containers, include non-running ones. - --no-trunc=false: Don᾿t truncate output - -q, --quiet=false: Only display numeric IDs - -s, --size=false: Display sizes, not to be used with -q - --since="": Show only containers created since Id or Name, include non-running ones. + List containers + + -a, --all=false Show all containers. Only running containers are shown by default. + --before="" Show only container created before Id or Name, include non-running ones. + -l, --latest=false Show only the latest created container, include non-running ones. + -n=-1 Show n last created containers, include non-running ones. + --no-trunc=false Don't truncate output + -q, --quiet=false Only display numeric IDs + -s, --size=false Display sizes + --since="" Show only containers created since Id or Name, include non-running ones. Running `docker ps` showing 2 linked containers. @@ -691,10 +735,10 @@ Running `docker ps` showing 2 linked containers. ## pull -Pull an image or a repository from the registry - Usage: docker pull NAME[:TAG] + Pull an image or a repository from the registry + Most of your images will be created on top of a base image from the [Docker.io](https://index.docker.io) registry. @@ -713,30 +757,30 @@ use `docker pull`: ## push -Push an image or a repository to the registry - Usage: docker push NAME[:TAG] -Use `docker push` to share your images on public or -private registries. + Push an image or a repository to the registry + +Use `docker push` to share your images to the [Docker.io](https://index.docker.io) +registry or to a self-hosted one. ## restart -Restart a running container + Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] - Usage: docker restart [OPTIONS] NAME + Restart a running container - -t, --time=10: Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10 + -t, --time=10 Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10 ## rm -Remove one or more containers + Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] - Usage: docker rm [OPTIONS] CONTAINER + Remove one or more containers - -l, --link="": Remove the link instead of the actual container - -f, --force=false: Force removal of running container - -v, --volumes=false: Remove the volumes associated to the container + -f, --force=false Force removal of running container + -l, --link=false Remove the specified link and not the underlying container + -v, --volumes=false Remove the volumes associated to the container ### Known Issues (rm) @@ -768,12 +812,12 @@ delete them. Any running containers will not be deleted. ## rmi -Remove one or more images - Usage: docker rmi IMAGE [IMAGE...] - -f, --force=false: Force - --no-prune=false: Do not delete untagged parents + Remove one or more images + + -f, --force=false Force + --no-prune=false Do not delete untagged parents ### Removing tagged images @@ -805,43 +849,43 @@ removed before the image is removed. ## run -Run a command in a new container + Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] - Usage: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] + Run a command in a new container - -a, --attach=[] Attach to stdin, stdout or stderr. - -c, --cpu-shares=0 CPU shares (relative weight) - --cidfile="" Write the container ID to the file - -d, --detach=false Detached mode: Run container in the background, print new container id - --dns=[] Set custom dns servers - --dns-search=[] Set custom dns search domains - -e, --env=[] Set environment variables - --entrypoint="" Overwrite the default entrypoint of the image - --env-file=[] Read in a line delimited file of ENV variables - --expose=[] Expose a port from the container without publishing it to your host - -h, --hostname="" Container host name - -i, --interactive=false Keep stdin open even if not attached - --link=[] Add link to another container (name:alias) - --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - -m, --memory="" Memory limit (format: , where unit = b, k, m or g) - --name="" Assign a name to the container - --net="bridge" Set the Network mode for the container - 'bridge': creates a new network stack for the container on the docker bridge - 'none': no networking for this container - 'container:': reuses another container network stack - 'host': use the host network stack inside the contaner - -p, --publish=[] Publish a container's port to the host - format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort - (use 'docker port' to see the actual mapping) - -P, --publish-all=false Publish all exposed ports to the host interfaces - --privileged=false Give extended privileges to this container - --rm=false Automatically remove the container when it exits (incompatible with -d) - --sig-proxy=true Proxify all received signal to the process (even in non-tty mode) - -t, --tty=false Allocate a pseudo-tty - -u, --user="" Username or UID - -v, --volume=[] Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container) - --volumes-from=[] Mount volumes from the specified container(s) - -w, --workdir="" Working directory inside the container + -a, --attach=[] Attach to stdin, stdout or stderr. + -c, --cpu-shares=0 CPU shares (relative weight) + --cidfile="" Write the container ID to the file + -d, --detach=false Detached mode: Run container in the background, print new container id + --dns=[] Set custom dns servers + --dns-search=[] Set custom dns search domains + -e, --env=[] Set environment variables + --entrypoint="" Overwrite the default entrypoint of the image + --env-file=[] Read in a line delimited file of ENV variables + --expose=[] Expose a port from the container without publishing it to your host + -h, --hostname="" Container host name + -i, --interactive=false Keep stdin open even if not attached + --link=[] Add link to another container (name:alias) + --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + -m, --memory="" Memory limit (format: , where unit = b, k, m or g) + --name="" Assign a name to the container + --net="bridge" Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the contaner + -p, --publish=[] Publish a container's port to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort + (use 'docker port' to see the actual mapping) + -P, --publish-all=false Publish all exposed ports to the host interfaces + --privileged=false Give extended privileges to this container + --rm=false Automatically remove the container when it exits (incompatible with -d) + --sig-proxy=true Proxify all received signal to the process (even in non-tty mode) + -t, --tty=false Allocate a pseudo-tty + -u, --user="" Username or UID + -v, --volume=[] Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container) + --volumes-from=[] Mount volumes from the specified container(s) + -w, --workdir="" Working directory inside the container The `docker run` command first `creates` a writeable container layer over the specified image, and then `starts` it using the specified command. That is, @@ -853,11 +897,9 @@ of all containers. The `docker run` command can be used in combination with `docker commit` to [*change the command that a container runs*](#commit-an-existing-container). -See [*Redirect Ports*](/use/port_redirection/#port-redirection) -for more detailed information about the `--expose`, `-p`, `-P` and `--link` -parameters, and [*Link Containers*]( -/use/working_with_links_names/#working-with-links-names) for specific -examples using `--link`. +See the [Docker User Guide](/userguide/dockerlinks/) for more detailed +information about the `--expose`, `-p`, `-P` and `--link` parameters, +and linking containers. ### Known Issues (run –volumes-from) @@ -923,16 +965,16 @@ manipulate the host's docker daemon. $ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash -This binds port `8080` of the container to port `80` on `127.0.0.1` of the host -machine. [*Redirect Ports*](/use/port_redirection/#port-redirection) +This binds port `8080` of the container to port `80` on `127.0.0.1` of +the host machine. The [Docker User Guide](/userguide/dockerlinks/) explains in detail how to manipulate ports in Docker. $ sudo docker run --expose 80 ubuntu bash -This exposes port `80` of the container for use within a link without publishing -the port to the host system's interfaces. [*Redirect Ports*]( -/use/port_redirection/#port-redirection) explains in detail how to -manipulate ports in Docker. +This exposes port `80` of the container for use within a link without +publishing the port to the host system's interfaces. The [Docker User +Guide](/userguide/dockerlinks) explains in detail how to manipulate +ports in Docker. $ sudo docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash @@ -1052,11 +1094,11 @@ application change: ## save -Save an image to a tar archive (streamed to stdout by default) - Usage: docker save IMAGE - -o, --output="": Write to an file, instead of STDOUT + Save an image to a tar archive (streamed to stdout by default) + + -o, --output="" Write to an file, instead of STDOUT Produces a tarred repository to the standard output stream. Contains all parent layers, and all tags + versions, or specified repo:tag. @@ -1065,11 +1107,11 @@ It is used to create a backup that can then be used with `docker load` $ sudo docker save busybox > busybox.tar - $ ls -sh b.tar - 2.7M b.tar + $ ls -sh busybox.tar + 2.7M busybox.tar $ sudo docker save --output busybox.tar busybox - $ ls -sh b.tar - 2.7M b.tar + $ ls -sh busybox.tar + 2.7M busybox.tar $ sudo docker save -o fedora-all.tar fedora $ sudo docker save -o fedora-latest.tar fedora:latest @@ -1079,59 +1121,68 @@ Search [Docker.io](https://index.docker.io) for images Usage: docker search TERM - --no-trunc=false: Don᾿t truncate output - -s, --stars=0: Only displays with at least xxx stars - -t, --trusted=false: Only show trusted builds + Search the docker index for images + + --no-trunc=false Don't truncate output + -s, --stars=0 Only displays with at least xxx stars + --automated=false Only show automated builds See [*Find Public Images on Docker.io*]( -/use/workingwithrepository/#find-public-images-on-dockerio) for +/userguide/dockerrepos/#find-public-images-on-dockerio) for more details on finding shared images from the commandline. ## start -Start a stopped container + Usage: docker start CONTAINER [CONTAINER...] - Usage: docker start [OPTIONS] CONTAINER + Restart a stopped container - -a, --attach=false: Attach container᾿s stdout/stderr and forward all signals to the process - -i, --interactive=false: Attach container᾿s stdin + -a, --attach=false Attach container's stdout/stderr and forward all signals to the process + -i, --interactive=false Attach container's stdin + +When run on a container that has already been started, +takes no action and succeeds unconditionally. ## stop -Stop a running container (Send SIGTERM, and then SIGKILL after grace period) - Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - -t, --time=10: Number of seconds to wait for the container to stop before killing it. + Stop a running container (Send SIGTERM, and then SIGKILL after grace period) + + -t, --time=10 Number of seconds to wait for the container to stop before killing it. The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL ## tag -Tag an image into a repository - Usage: docker tag [OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG] - -f, --force=false: Force + Tag an image into a repository + + -f, --force=false Force You can group your images together using names and tags, and then upload them to [*Share Images via Repositories*]( -/use/workingwithrepository/#working-with-the-repository). +/userguide/dockerrepos/#working-with-the-repository). ## top Usage: docker top CONTAINER [ps OPTIONS] -Lookup the running processes of a container + Lookup the running processes of a container ## version -Show the version of the Docker client, daemon, and latest released -version. + Usage: docker version + + Show the docker version information. + +Show the Docker version, API version, Git commit, and Go version of +both Docker client and daemon. ## wait - Usage: docker wait [OPTIONS] NAME + Usage: docker wait CONTAINER [CONTAINER...] -Block until a container stops, then print its exit code. + Block until a container stops, then print its exit code. diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 09c2b642a1..7d5fcbc51f 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -11,21 +11,17 @@ The [*Image*](/terms/image/#image-def) which starts the process may define defaults related to the binary to run, the networking to expose, and more, but `docker run` gives final control to the operator who starts the container from the image. That's the main -reason [*run*](/commandline/cli/#cli-run) has more options than any +reason [*run*](/reference/commandline/cli/#cli-run) has more options than any other `docker` command. -Every one of the [*Examples*](/examples/#example-list) shows -running containers, and so here we try to give more in-depth guidance. - ## General Form -As you`ve seen in the [*Examples*](/examples/#example-list), the -basic run command takes this form: +The basic `docker run` command takes this form: $ docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] To learn how to interpret the types of `[OPTIONS]`, -see [*Option types*](/commandline/cli/#cli-options). +see [*Option types*](/reference/commandline/cli/#cli-options). The list of `[OPTIONS]` breaks down into two groups: @@ -54,10 +50,10 @@ following options. - [Detached (-d)](#detached-d) - [Foreground](#foreground) - [Container Identification](#container-identification) - - [Name (–name)](#name-name) + - [Name (--name)](#name-name) - [PID Equivalent](#pid-equivalent) - [Network Settings](#network-settings) - - [Clean Up (–rm)](#clean-up-rm) + - [Clean Up (--rm)](#clean-up-rm) - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory) - [Runtime Privilege and LXC @@ -75,9 +71,9 @@ default foreground mode: In detached mode (`-d=true` or just `-d`), all I/O should be done through network connections or shared volumes because the container is -no longer listening to the commandline where you executed `docker run`. +no longer listening to the command line where you executed `docker run`. You can reattach to a detached container with `docker` -[*attach*](commandline/cli/#attach). If you choose to run a +[*attach*](/reference/commandline/cli/#attach). If you choose to run a container in the detached mode, then you cannot use the `--rm` option. ### Foreground @@ -85,7 +81,7 @@ container in the detached mode, then you cannot use the `--rm` option. In foreground mode (the default when `-d` is not specified), `docker run` can start the process in the container and attach the console to the process's standard input, output, and standard error. It can even pretend to be a TTY -(this is what most commandline executables expect) and pass along signals. All +(this is what most command line executables expect) and pass along signals. All of that is configurable: -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr`` @@ -121,11 +117,11 @@ assign a name to the container with `--name` then the daemon will also generate a random string name too. The name can become a handy way to add meaning to a container since you can use this name when defining -[*links*](/use/working_with_links_names/#working-with-links-names) +[*links*](/userguide/dockerlinks/#working-with-links-names) (or any other place you need to identify a container). This works for both background and foreground Docker containers. -### PID Equivalent +### PID Equivalent And finally, to help with automation, you can have Docker write the container ID out to a file of your choosing. This is similar to how some @@ -136,9 +132,8 @@ PID files): ## Network Settings - --dns=[] : Set custom dns servers for the container - --net="bridge": Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack), 'host': use the host network stack inside the container - --net="bridge" Set the Network mode for the container + --dns=[] : Set custom dns servers for the container + --net="bridge" : Set the Network mode for the container 'bridge': creates a new network stack for the container on the docker bridge 'none': no networking for this container 'container:': reuses another container network stack @@ -235,7 +230,7 @@ By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. This is because by default a container is not allowed to access any devices, but a "privileged" container is given access to all devices (see [lxc-template.go]( -https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go) +https://github.com/dotcloud/docker/blob/master/daemon/execdriver/lxc/lxc_template.go) and documentation on [cgroups devices]( https://www.kernel.org/doc/Documentation/cgroups/devices.txt)). @@ -250,14 +245,14 @@ If the Docker daemon was started using the `lxc` exec-driver (`docker -d --exec-driver=lxc`) then the operator can also specify LXC options using one or more `--lxc-conf` parameters. These can be new parameters or override existing parameters from the [lxc-template.go]( -https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go). +https://github.com/dotcloud/docker/blob/master/daemon/execdriver/lxc/lxc_template.go). Note that in the future, a given host's docker daemon may not use LXC, so this is an implementation-specific configuration meant for operators already familiar with using LXC directly. ## Overriding Dockerfile Image Defaults -When a developer builds an image from a [*Dockerfile*](builder/#dockerbuilder) +When a developer builds an image from a [*Dockerfile*](/reference/builder/#dockerbuilder) or when she commits it, the developer can set a number of default parameters that take effect when the image starts up as a container. @@ -426,7 +421,7 @@ mechanism to communicate with a linked container by its alias: --volumes-from="": Mount all volumes from the given container(s) The volumes commands are complex enough to have their own documentation in -section [*Share Directories via Volumes*](/use/working_with_volumes/#volume-def). +section [*Share Directories via Volumes*](/userguide/dockervolumes/#volume-def). A developer can define one or more `VOLUME's associated with an image, but only the operator can give access from one container to another (or from a container to a volume mounted on the host). @@ -440,6 +435,8 @@ but the operator can override it: -u="": Username or UID +> **Note:** if you pass numeric uid, it must be in range 0-2147483647. + ## WORKDIR The default working directory for running binaries within a container is the diff --git a/docs/sources/terms/container.md b/docs/sources/terms/container.md index 5bedc3160e..8b42868788 100644 --- a/docs/sources/terms/container.md +++ b/docs/sources/terms/container.md @@ -8,18 +8,19 @@ page_keywords: containers, lxc, concepts, explanation, image, container ![](/terms/images/docker-filesystems-busyboxrw.png) -Once you start a process in Docker from an [*Image*](image.md), Docker fetches -the image and its [*Parent Image*](image.md), and repeats the process until it -reaches the [*Base Image*](image.md/#base-image-def). Then the -[*Union File System*](layer.md) adds a read-write layer on top. That read-write -layer, plus the information about its [*Parent Image*](image.md) and some -additional information like its unique id, networking configuration, and -resource limits is called a **container**. +Once you start a process in Docker from an [*Image*](/terms/image), Docker +fetches the image and its [*Parent Image*](/terms/image), and repeats the +process until it reaches the [*Base Image*](/terms/image/#base-image-def). Then +the [*Union File System*](/terms/layer) adds a read-write layer on top. That +read-write layer, plus the information about its [*Parent +Image*](/terms/image) +and some additional information like its unique id, networking +configuration, and resource limits is called a **container**. ## Container State -Containers can change, and so they have state. A container may be **running** or -**exited**. +Containers can change, and so they have state. A container may be +**running** or **exited**. When a container is running, the idea of a "container" also includes a tree of processes running on the CPU, isolated from the other processes @@ -31,13 +32,13 @@ processes restart from scratch (their memory state is **not** preserved in a container), but the file system is just as it was when the container was stopped. -You can promote a container to an [*Image*](image.md) with `docker commit`. +You can promote a container to an [*Image*](/terms/image) with `docker commit`. Once a container is an image, you can use it as a parent for new containers. ## Container IDs All containers are identified by a 64 hexadecimal digit string (internally a 256bit value). To simplify their use, a short ID of the -first 12 characters can be used on the commandline. There is a small +first 12 characters can be used on the command line. There is a small possibility of short id collisions, so the docker server will always return the long ID. diff --git a/docs/sources/terms/image.md b/docs/sources/terms/image.md index b10debcc6a..40438be631 100644 --- a/docs/sources/terms/image.md +++ b/docs/sources/terms/image.md @@ -8,10 +8,10 @@ page_keywords: containers, lxc, concepts, explanation, image, container ![](/terms/images/docker-filesystems-debian.png) -In Docker terminology, a read-only [*Layer*](../layer/#layer-def) is +In Docker terminology, a read-only [*Layer*](/terms/layer/#layer-def) is called an **image**. An image never changes. -Since Docker uses a [*Union File System*](../layer/#ufs-def), the +Since Docker uses a [*Union File System*](/terms/layer/#ufs-def), the processes think the whole file system is mounted read-write. But all the changes go to the top-most writeable layer, and underneath, the original file in the read-only image is unchanged. Since images don't change, diff --git a/docs/sources/terms/layer.md b/docs/sources/terms/layer.md index b4b2ea4b7a..561807fc44 100644 --- a/docs/sources/terms/layer.md +++ b/docs/sources/terms/layer.md @@ -7,7 +7,7 @@ page_keywords: containers, lxc, concepts, explanation, image, container ## Introduction In a traditional Linux boot, the kernel first mounts the root [*File -System*](../filesystem/#filesystem-def) as read-only, checks its +System*](/terms/filesystem/#filesystem-def) as read-only, checks its integrity, and then switches the whole rootfs volume to read-write mode. ## Layer diff --git a/docs/sources/terms/registry.md b/docs/sources/terms/registry.md index 2006710607..08f8e8f69d 100644 --- a/docs/sources/terms/registry.md +++ b/docs/sources/terms/registry.md @@ -6,9 +6,9 @@ page_keywords: containers, concepts, explanation, image, repository, container ## Introduction -A Registry is a hosted service containing [*repositories*]( -../repository/#repository-def) of [*images*](../image/#image-def) which -responds to the Registry API. +A Registry is a hosted service containing +[*repositories*](/terms/repository/#repository-def) of +[*images*](/terms/image/#image-def) which responds to the Registry API. The default registry can be accessed using a browser at [Docker.io](http://index.docker.io) or using the @@ -16,5 +16,5 @@ The default registry can be accessed using a browser at ## Further Reading -For more information see [*Working with Repositories*]( -../use/workingwithrepository/#working-with-the-repository) +For more information see [*Working with +Repositories*](/userguide/dockerrepos/#working-with-the-repository) diff --git a/docs/sources/terms/repository.md b/docs/sources/terms/repository.md index 1e035c95f4..52c83d45d8 100644 --- a/docs/sources/terms/repository.md +++ b/docs/sources/terms/repository.md @@ -7,7 +7,7 @@ page_keywords: containers, concepts, explanation, image, repository, container ## Introduction A repository is a set of images either on your local Docker server, or -shared, by pushing it to a [*Registry*](../registry/#registry-def) +shared, by pushing it to a [*Registry*](/terms/registry/#registry-def) server. Images can be associated with a repository (or multiple) by giving them @@ -31,5 +31,5 @@ If you create a new repository which you want to share, you will need to set at least the `user_name`, as the `default` blank `user_name` prefix is reserved for official Docker images. -For more information see [*Working with Repositories*]( -../use/workingwithrepository/#working-with-the-repository) +For more information see [*Working with +Repositories*](/userguide/dockerrepos/#working-with-the-repository) diff --git a/docs/sources/use.md b/docs/sources/use.md deleted file mode 100644 index 5b2524361e..0000000000 --- a/docs/sources/use.md +++ /dev/null @@ -1,13 +0,0 @@ -# Use - -## Contents: - - - [First steps with Docker](basics/) - - [Share Images via Repositories](workingwithrepository/) - - [Redirect Ports](port_redirection/) - - [Configure Networking](networking/) - - [Automatically Start Containers](host_integration/) - - [Share Directories via Volumes](working_with_volumes/) - - [Link Containers](working_with_links_names/) - - [Link via an Ambassador Container](ambassador_pattern_linking/) - - [Using Puppet](puppet/) \ No newline at end of file diff --git a/docs/sources/use/ambassador_pattern_linking.md b/docs/sources/use/ambassador_pattern_linking.md deleted file mode 100644 index 2bdd434f6e..0000000000 --- a/docs/sources/use/ambassador_pattern_linking.md +++ /dev/null @@ -1,155 +0,0 @@ -page_title: Link via an Ambassador Container -page_description: Using the Ambassador pattern to abstract (network) services -page_keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming - -# Link via an Ambassador Container - -## Introduction - -Rather than hardcoding network links between a service consumer and -provider, Docker encourages service portability. - -eg, instead of - - (consumer) --> (redis) - -requiring you to restart the `consumer` to attach it -to a different `redis` service, you can add -ambassadors - - (consumer) --> (redis-ambassador) --> (redis) - - or - - (consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis) - -When you need to rewire your consumer to talk to a different redis -server, you can just restart the `redis-ambassador` -container that the consumer is connected to. - -This pattern also allows you to transparently move the redis server to a -different docker host from the consumer. - -Using the `svendowideit/ambassador` container, the -link wiring is controlled entirely from the `docker run` -parameters. - -## Two host Example - -Start actual redis server on one Docker host - - big-server $ docker run -d -name redis crosbymichael/redis - -Then add an ambassador linked to the redis server, mapping a port to the -outside world - - big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador - -On the other host, you can set up another ambassador setting environment -variables for each remote port we want to proxy to the -`big-server` - - client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador - -Then on the `client-server` host, you can use a -redis client container to talk to the remote redis server, just by -linking to the local redis ambassador. - - client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli - redis 172.17.0.160:6379> ping - PONG - -## How it works - -The following example shows what the `svendowideit/ambassador` -container does automatically (with a tiny amount of `sed`) - -On the docker host (192.168.1.52) that redis will run on: - - # start actual redis server - $ docker run -d -name redis crosbymichael/redis - - # get a redis-cli container for connection testing - $ docker pull relateiq/redis-cli - - # test the redis server by talking to it directly - $ docker run -t -i -rm -link redis:redis relateiq/redis-cli - redis 172.17.0.136:6379> ping - PONG - ^D - - # add redis ambassador - $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh - -in the redis_ambassador container, you can see the linked redis -containers'senv - - $ env - REDIS_PORT=tcp://172.17.0.136:6379 - REDIS_PORT_6379_TCP_ADDR=172.17.0.136 - REDIS_NAME=/redis_ambassador/redis - HOSTNAME=19d7adf4705e - REDIS_PORT_6379_TCP_PORT=6379 - HOME=/ - REDIS_PORT_6379_TCP_PROTO=tcp - container=lxc - REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379 - TERM=xterm - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PWD=/ - -This environment is used by the ambassador socat script to expose redis -to the world (via the -p 6379:6379 port mapping) - - $ docker rm redis_ambassador - $ sudo ./contrib/mkimage-unittest.sh - $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh - - $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379 - -then ping the redis server via the ambassador - -Now goto a different server - - $ sudo ./contrib/mkimage-unittest.sh - $ docker run -t -i -expose 6379 -name redis_ambassador docker-ut sh - - $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379 - -and get the redis-cli image so we can talk over the ambassador bridge - - $ docker pull relateiq/redis-cli - $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli - redis 172.17.0.160:6379> ping - PONG - -## The svendowideit/ambassador Dockerfile - -The `svendowideit/ambassador` image is a small -busybox image with `socat` built in. When you start -the container, it uses a small `sed` script to parse -out the (possibly multiple) link environment variables to set up the -port forwarding. On the remote host, you need to set the variable using -the `-e` command line option. - -`--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379` -will forward the local `1234` port to the -remote IP and port - in this case `192.168.1.52:6379`. - - # - # - # first you need to build the docker-ut image - # using ./contrib/mkimage-unittest.sh - # then - # docker build -t SvenDowideit/ambassador . - # docker tag SvenDowideit/ambassador ambassador - # then to run it (on the host that has the real backend on it) - # docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador - # on the remote host, you can set up another ambassador - # docker run -t -i -name redis_ambassador -expose 6379 sh - - FROM docker-ut - MAINTAINER SvenDowideit@home.org.au - - - CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top diff --git a/docs/sources/use/networking.md b/docs/sources/use/networking.md deleted file mode 100644 index 00d0684256..0000000000 --- a/docs/sources/use/networking.md +++ /dev/null @@ -1,138 +0,0 @@ -page_title: Configure Networking -page_description: Docker networking -page_keywords: network, networking, bridge, docker, documentation - -# Configure Networking - -## Introduction - -Docker uses Linux bridge capabilities to provide network connectivity to -containers. The `docker0` bridge interface is -managed by Docker for this purpose. When the Docker daemon starts it : - - - creates the `docker0` bridge if not present - - searches for an IP address range which doesn't overlap with an existing route - - picks an IP in the selected range - - assigns this IP to the `docker0` bridge - - - - # List host bridges - $ sudo brctl show - bridge name bridge id STP enabled interfaces - docker0 8000.000000000000 no - - # Show docker0 IP address - $ sudo ifconfig docker0 - docker0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx - inet addr:172.17.42.1 Bcast:0.0.0.0 Mask:255.255.0.0 - -At runtime, a [*specific kind of virtual interface*](#vethxxxx-device) -is given to each container which is then bonded to the `docker0` bridge. -Each container also receives a dedicated IP address from the same range -as `docker0`. The `docker0` IP address is used as the default gateway -for the container. - - # Run a container - $ sudo docker run -t -i -d base /bin/bash - 52f811c5d3d69edddefc75aff5a4525fc8ba8bcfa1818132f9dc7d4f7c7e78b4 - - $ sudo brctl show - bridge name bridge id STP enabled interfaces - docker0 8000.fef213db5a66 no vethQCDY1N - -Above, `docker0` acts as a bridge for the `vethQCDY1N` interface which -is dedicated to the 52f811c5d3d6 container. - -## How to use a specific IP address range - -Docker will try hard to find an IP range that is not used by the host. -Even though it works for most cases, it's not bullet-proof and sometimes -you need to have more control over the IP addressing scheme. - -For this purpose, Docker allows you to manage the `docker0` -bridge or your own one using the `-b=` -parameter. - -In this scenario: - - - ensure Docker is stopped - - create your own bridge (`bridge0` for example) - - assign a specific IP to this bridge - - start Docker with the `-b=bridge0` parameter - - - - # Stop Docker - $ sudo service docker stop - - # Clean docker0 bridge and - # add your very own bridge0 - $ sudo ifconfig docker0 down - $ sudo brctl addbr bridge0 - $ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0 - - # Edit your Docker startup file - $ echo "DOCKER_OPTS=\"-b=bridge0\"" >> /etc/default/docker - - # Start Docker - $ sudo service docker start - - # Ensure bridge0 IP is not changed by Docker - $ sudo ifconfig bridge0 - bridge0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx - inet addr:192.168.227.1 Bcast:192.168.227.255 Mask:255.255.255.0 - - # Run a container - docker run -i -t base /bin/bash - - # Container IP in the 192.168.227/24 range - root@261c272cd7d5:/# ifconfig eth0 - eth0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx - inet addr:192.168.227.5 Bcast:192.168.227.255 Mask:255.255.255.0 - - # bridge0 IP as the default gateway - root@261c272cd7d5:/# route -n - Kernel IP routing table - Destination Gateway Genmask Flags Metric Ref Use Iface - 0.0.0.0 192.168.227.1 0.0.0.0 UG 0 0 0 eth0 - 192.168.227.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 - - # hits CTRL+P then CTRL+Q to detach - - # Display bridge info - $ sudo brctl show - bridge name bridge id STP enabled interfaces - bridge0 8000.fe7c2e0faebd no vethAQI2QT - -## Container intercommunication - -The value of the Docker daemon's `icc` parameter -determines whether containers can communicate with each other over the -bridge network. - - - The default, `-icc=true` allows containers to communicate with each other. - - `-icc=false` means containers are isolated from each other. - -Docker uses `iptables` under the hood to either -accept or drop communication between containers. - -## What is the vethXXXX device? - -Well. Things get complicated here. - -The `vethXXXX` interface is the host side of a -point-to-point link between the host and the corresponding container; -the other side of the link is the container's `eth0` -interface. This pair (host `vethXXX` and container -`eth0`) are connected like a tube. Everything that -comes in one side will come out the other side. - -All the plumbing is delegated to Linux network capabilities (check the -ip link command) and the namespaces infrastructure. - -## I want more - -Jérôme Petazzoni has create `pipework` to connect together containers in -arbitrarily complex scenarios: -[https://github.com/jpetazzo/pipework](https://github.com/jpetazzo/pipework) diff --git a/docs/sources/use/port_redirection.md b/docs/sources/use/port_redirection.md deleted file mode 100644 index 9f2ce98eae..0000000000 --- a/docs/sources/use/port_redirection.md +++ /dev/null @@ -1,124 +0,0 @@ -page_title: Redirect Ports -page_description: usage about port redirection -page_keywords: Usage, basic port, docker, documentation, examples - -# Redirect Ports - -## Introduction - -Interacting with a service is commonly done through a connection to a -port. When this service runs inside a container, one can connect to the -port after finding the IP address of the container as follows: - - # Find IP address of container with ID - $ docker inspect | grep IPAddress | cut -d '"' -f 4 - -However, this IP address is local to the host system and the container -port is not reachable by the outside world. Furthermore, even if the -port is used locally, e.g. by another container, this method is tedious -as the IP address of the container changes every time it starts. - -Docker addresses these two problems and give a simple and robust way to -access services running inside containers. - -To allow non-local clients to reach the service running inside the -container, Docker provide ways to bind the container port to an -interface of the host system. To simplify communication between -containers, Docker provides the linking mechanism. - -## Auto map all exposed ports on the host - -To bind all the exposed container ports to the host automatically, use -`docker run -P `. The mapped host ports -will be auto-selected from a pool of unused ports (49000..49900), and -you will need to use `docker ps`, `docker inspect ` or -`docker port ` to determine what they are. - -## Binding a port to a host interface - -To bind a port of the container to a specific interface of the host -system, use the `-p` parameter of the `docker run` command: - - # General syntax - $ docker run -p [([:[host_port]])|():][/udp] - -When no host interface is provided, the port is bound to all available -interfaces of the host machine (aka INADDR_ANY, or 0.0.0.0). When no -host port is provided, one is dynamically allocated. The possible -combinations of options for TCP port are the following: - - # Bind TCP port 8080 of the container to TCP port 80 on 127.0.0.1 of the host machine. - $ docker run -p 127.0.0.1:80:8080 - - # Bind TCP port 8080 of the container to a dynamically allocated TCP port on 127.0.0.1 of the host machine. - $ docker run -p 127.0.0.1::8080 - - # Bind TCP port 8080 of the container to TCP port 80 on all available interfaces of the host machine. - $ docker run -p 80:8080 - - # Bind TCP port 8080 of the container to a dynamically allocated TCP port on all available interfaces of the host machine. - $ docker run -p 8080 - -UDP ports can also be bound by adding a trailing `/udp`. All the -combinations described for TCP work. Here is only one example: - - # Bind UDP port 5353 of the container to UDP port 53 on 127.0.0.1 of the host machine. - $ docker run -p 127.0.0.1:53:5353/udp - -The command `docker port` lists the interface and port on the host machine -bound to a given container port. It is useful when using dynamically allocated -ports: - - # Bind to a dynamically allocated port - $ docker run -p 127.0.0.1::8080 --name dyn-bound - - # Lookup the actual port - $ docker port dyn-bound 8080 - 127.0.0.1:49160 - -## Linking a container - -Communication between two containers can also be established in a -docker-specific way called linking. - -To briefly present the concept of linking, let us consider two containers: -`server`, containing the service, and `client`, accessing the service. Once -`server` is running, `client` is started and links to server. Linking sets -environment variables in `client` giving it some information about `server`. -In this sense, linking is a method of service discovery. - -Let us now get back to our topic of interest; communication between the two -containers. We mentioned that the tricky part about this communication was that -the IP address of `server` was not fixed. Therefore, some of the environment -variables are going to be used to inform `client` about this IP address. This -process called exposure, is possible because `client` is started after `server` -has been started. - -Here is a full example. On `server`, the port of interest is exposed. The -exposure is done either through the `--expose` parameter to the `docker run` -command, or the `EXPOSE` build command in a Dockerfile: - - # Expose port 80 - $ docker run --expose 80 --name server - -The `client` then links to the `server`: - - # Link - $ docker run --name client --link server:linked-server - -`client` locally refers to `server` as `linked-server`. The following -environment variables, among others, are available on `client`: - - # The default protocol, ip, and port of the service running in the container - $ LINKED-SERVER_PORT=tcp://172.17.0.8:80 - - # A specific protocol, ip, and port of various services - $ LINKED-SERVER_PORT_80_TCP=tcp://172.17.0.8:80 - $ LINKED-SERVER_PORT_80_TCP_PROTO=tcp - $ LINKED-SERVER_PORT_80_TCP_ADDR=172.17.0.8 - $ LINKED-SERVER_PORT_80_TCP_PORT=80 - -This tells `client` that a service is running on port 80 of `server` and that -`server` is accessible at the IP address 172.17.0.8 - -Note: Using the `-p` parameter also exposes the port. diff --git a/docs/sources/use/working_with_links_names.md b/docs/sources/use/working_with_links_names.md deleted file mode 100644 index 6951e3c26f..0000000000 --- a/docs/sources/use/working_with_links_names.md +++ /dev/null @@ -1,140 +0,0 @@ -page_title: Link Containers -page_description: How to create and use both links and names -page_keywords: Examples, Usage, links, linking, docker, documentation, examples, names, name, container naming - -# Link Containers - -## Introduction - -From version 0.6.5 you are now able to `name` a container and `link` it to -another container by referring to its name. This will create a parent -> child -relationship where the parent container can see selected information about its -child. - -## Container Naming - -New in version v0.6.5. - -You can now name your container by using the `--name` flag. If no name is -provided, Docker will automatically generate a name. You can see this name -using the `docker ps` command. - - # format is "sudo docker run --name " - $ sudo docker run --name test ubuntu /bin/bash - - # the flag "-a" Show all containers. Only running containers are shown by default. - $ sudo docker ps -a - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 2522602a0d99 ubuntu:12.04 /bin/bash 14 seconds ago Exit 0 test - -## Links: service discovery for docker - -New in version v0.6.5. - -Links allow containers to discover and securely communicate with each -other by using the flag `-link name:alias`. Inter-container communication -can be disabled with the daemon flag `-icc=false`. With this flag set to -`false`, Container A cannot access Container unless explicitly allowed via -a link. This is a huge win for securing your containers. When two containers -are linked together Docker creates a parent child relationship between the -containers. The parent container will be able to access information via -environment variables of the child such as name, exposed ports, IP and other -selected environment variables. - -When linking two containers Docker will use the exposed ports of the container -to create a secure tunnel for the parent to access. If a database container -only exposes port 8080 then the linked container will only be allowed to access -port 8080 and nothing else if inter-container communication is set to false. - -For example, there is an image called `crosbymichael/redis` that exposes the -port 6379 and starts the Redis server. Let's name the container as `redis` -based on that image and run it as daemon. - - $ sudo docker run -d --name redis crosbymichael/redis - -We can issue all the commands that you would expect using the name `redis`; -start, stop, attach, using the name for our container. The name also allows -us to link other containers into this one. - -Next, we can start a new web application that has a dependency on Redis and -apply a link to connect both containers. If you noticed when running our Redis -server we did not use the `-p` flag to publish the Redis port to the host -system. Redis exposed port 6379 and this is all we need to establish a link. - - $ sudo docker run -t -i --link redis:db --name webapp ubuntu bash - -When you specified `--link redis:db` you are telling Docker to link the -container named `redis` into this new container with the alias `db`. -Environment variables are prefixed with the alias so that the parent container -can access network and environment information from the containers that are -linked into it. - -If we inspect the environment variables of the second container, we would see -all the information about the child container. - - $ root@4c01db0b339c:/# env - - HOSTNAME=4c01db0b339c - DB_NAME=/webapp/db - TERM=xterm - DB_PORT=tcp://172.17.0.8:6379 - DB_PORT_6379_TCP=tcp://172.17.0.8:6379 - DB_PORT_6379_TCP_PROTO=tcp - DB_PORT_6379_TCP_ADDR=172.17.0.8 - DB_PORT_6379_TCP_PORT=6379 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PWD=/ - SHLVL=1 - HOME=/ - container=lxc - _=/usr/bin/env - root@4c01db0b339c:/# - -Accessing the network information along with the environment of the child -container allows us to easily connect to the Redis service on the specific -IP and port in the environment. - -> **Note**: -> These Environment variables are only set for the first process in the -> container. Similarly, some daemons (such as `sshd`) -> will scrub them when spawning shells for connection. - -You can work around this by storing the initial `env` in a file, or looking -at `/proc/1/environ`. - -Running `docker ps` shows the 2 containers, and the `webapp/db` alias name for -the Redis container. - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp - d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db - -## Resolving Links by Name - -New in version v0.11. - -Linked containers can be accessed by hostname. Hostnames are mapped by -appending entries to '/etc/hosts' using the linked container's alias. - -For example, linking a container using '--link redis:db' will generate the -following '/etc/hosts' file: - - root@6541a75d44a0:/# cat /etc/hosts - 172.17.0.3 6541a75d44a0 - 172.17.0.2 db - - 127.0.0.1 localhost - ::1 localhost ip6-localhost ip6-loopback - fe00::0 ip6-localnet - ff00::0 ip6-mcastprefix - ff02::1 ip6-allnodes - ff02::2 ip6-allrouters - root@6541a75d44a0:/# - -Using this mechanism, you can communicate with the linked container by -name: - - root@6541a75d44a0:/# echo PING | redis-cli -h db - PONG - root@6541a75d44a0:/# diff --git a/docs/sources/use/working_with_volumes.md b/docs/sources/use/working_with_volumes.md deleted file mode 100644 index 7d6136b85a..0000000000 --- a/docs/sources/use/working_with_volumes.md +++ /dev/null @@ -1,171 +0,0 @@ -page_title: Share Directories via Volumes -page_description: How to create and share volumes -page_keywords: Examples, Usage, volume, docker, documentation, examples - -# Share Directories via Volumes - -## Introduction - -A *data volume* is a specially-designated directory within one or more -containers that bypasses the [*Union File -System*](/terms/layer/#ufs-def) to provide several useful features -for persistent or shared data: - - - **Data volumes can be shared and reused between containers:** - This is the feature that makes data volumes so powerful. You can - use it for anything from hot database upgrades to custom backup or - replication tools. See the example below. - - **Changes to a data volume are made directly:** - Without the overhead of a copy-on-write mechanism. This is good for - very large files. - - **Changes to a data volume will not be included at the next commit:** - Because they are not recorded as regular filesystem changes in the - top layer of the [*Union File System*](/terms/layer/#ufs-def) - - **Volumes persist until no containers use them:** - As they are a reference counted resource. The container does not need to be - running to share its volumes, but running it can help protect it - against accidental removal via `docker rm`. - -Each container can have zero or more data volumes. - -New in version v0.3.0. - -## Getting Started - -Using data volumes is as simple as adding a `-v` -parameter to the `docker run` command. The -`-v` parameter can be used more than once in order -to create more volumes within the new container. To create a new -container with two new volumes: - - $ docker run -v /var/volume1 -v /var/volume2 busybox true - -This command will create the new container with two new volumes that -exits instantly (`true` is pretty much the smallest, -simplest program that you can run). Once created you can mount its -volumes in any other container using the `--volumes-from` -option; irrespective of whether the container is running or -not. - -Or, you can use the VOLUME instruction in a Dockerfile to add one or -more new volumes to any container created from that image: - - # BUILD-USING: $ docker build -t data . - # RUN-USING: $ docker run -name DATA data - FROM busybox - VOLUME ["/var/volume1", "/var/volume2"] - CMD ["/bin/true"] - -### Creating and mounting a Data Volume Container - -If you have some persistent data that you want to share between -containers, or want to use from non-persistent containers, it's best to -create a named Data Volume Container, and then to mount the data from -it. - -Create a named container with volumes to share (`/var/volume1` -and `/var/volume2`): - - $ docker run -v /var/volume1 -v /var/volume2 -name DATA busybox true - -Then mount those data volumes into your application containers: - - $ docker run -t -i -rm -volumes-from DATA -name client1 ubuntu bash - -You can use multiple `-volumes-from` parameters to -bring together multiple data volumes from multiple containers. - -Interestingly, you can mount the volumes that came from the -`DATA` container in yet another container via the -`client1` middleman container: - - $ docker run -t -i -rm -volumes-from client1 -name client2 ubuntu bash - -This allows you to abstract the actual data source from users of that -data, similar to [*Ambassador Pattern Linking*]( -../ambassador_pattern_linking/#ambassador-pattern-linking). - -If you remove containers that mount volumes, including the initial DATA -container, or the middleman, the volumes will not be deleted until there -are no containers still referencing those volumes. This allows you to -upgrade, or effectively migrate data volumes between containers. - -### Mount a Host Directory as a Container Volume: - - -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. - -You must specify an absolute path for `host-dir`. If `host-dir` is missing from -the command, then Docker creates a new volume. If `host-dir` is present but -points to a non-existent directory on the host, Docker will automatically -create this directory and use it as the source of the bind-mount. - -Note that this is not available from a Dockerfile due the portability and -sharing purpose of it. The `host-dir` volumes are entirely host-dependent -and might not work on any other machine. - -For example: - - # Usage: - # sudo docker run [OPTIONS] -v /(dir. on host):/(dir. in container):(Read-Write or Read-Only) [ARG..] - # Example: - $ sudo docker run -i -t -v /var/log:/logs_from_host:ro ubuntu bash - -The command above mounts the host directory `/var/log` into the container -with *read only* permissions as `/logs_from_host`. - -New in version v0.5.0. - -### Note for OS/X users and remote daemon users: - -OS/X users run `boot2docker` to create a minimalist virtual machine running -the docker daemon. That virtual machine then launches docker commands on -behalf of the OS/X command line. The means that `host directories` refer to -directories in the `boot2docker` virtual machine, not the OS/X filesystem. - -Similarly, anytime when the docker daemon is on a remote machine, the -`host directories` always refer to directories on the daemon's machine. - -### Backup, restore, or migrate data volumes - -You cannot back up volumes using `docker export`, `docker save` and `docker cp` -because they are external to images. Instead you can use `--volumes-from` to -start a new container that can access the data-container's volume. For example: - - $ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data - - - `-rm`: - remove the container when it exits - - `--volumes-from DATA`: - attach to the volumes shared by the `DATA` container - - `-v $(pwd):/backup`: - bind mount the current directory into the container; to write the tar file to - - `busybox`: - a small simpler image - good for quick maintenance - - `tar cvf /backup/backup.tar /data`: - creates an uncompressed tar file of all the files in the `/data` directory - -Then to restore to the same container, or another that you`ve made elsewhere: - - # create a new data container - $ sudo docker run -v /data -name DATA2 busybox true - # untar the backup files into the new container᾿s data volume - $ sudo docker run -rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar - data/ - data/sven.txt - # compare to the original container - $ sudo docker run -rm --volumes-from DATA -v `pwd`:/backup busybox ls /data - sven.txt - -You can use the basic techniques above to automate backup, migration and -restore testing using your preferred tools. - -## Known Issues - - - [Issue 2702](https://github.com/dotcloud/docker/issues/2702): - "lxc-start: Permission denied - failed to mount" could indicate a - permissions problem with AppArmor. Please see the issue for a - workaround. - - [Issue 2528](https://github.com/dotcloud/docker/issues/2528): the - busybox container is used to make the resulting container as small - and simple as possible - whenever you need to interact with the data - in the volume you mount it into another container. diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md deleted file mode 100644 index 07f130a909..0000000000 --- a/docs/sources/use/workingwithrepository.md +++ /dev/null @@ -1,246 +0,0 @@ -page_title: Share Images via Repositories -page_description: Repositories allow users to share images. -page_keywords: repo, repositories, usage, pull image, push image, image, documentation - -# Share Images via Repositories - -## Introduction - -Docker is not only a tool for creating and managing your own -[*containers*](/terms/container/#container-def) – **Docker is also a -tool for sharing**. A *repository* is a shareable collection of tagged -[*images*](/terms/image/#image-def) that together create the file -systems for containers. The repository's name is a label that indicates -the provenance of the repository, i.e. who created it and where the -original copy is located. - -You can find one or more repositories hosted on a *registry*. There are -two types of *registry*: public and private. There's also a default -*registry* that Docker uses which is called -[Docker.io](http://index.docker.io). -[Docker.io](http://index.docker.io) is the home of -"top-level" repositories and public "user" repositories. The Docker -project provides [Docker.io](http://index.docker.io) to host public and -[private repositories](https://index.docker.io/plans/), namespaced by -user. We provide user authentication and search over all the public -repositories. - -Docker acts as a client for these services via the `docker search, pull, -login` and `push` commands. - -## Repositories - -### Local Repositories - -Docker images which have been created and labeled on your local Docker -server need to be pushed to a Public (by default they are pushed to -[Docker.io](http://index.docker.io)) or Private registry to be shared. - -### Public Repositories - -There are two types of public repositories: *top-level* repositories -which are controlled by the Docker team, and *user* repositories created -by individual contributors. Anyone can read from these repositories – -they really help people get started quickly! You could also use -[*Trusted Builds*](#trusted-builds) if you need to keep -control of who accesses your images. - -- Top-level repositories can easily be recognized by **not** having a - `/` (slash) in their name. These repositories represent trusted images - provided by the Docker team. -- User repositories always come in the form of `/`. - This is what your published images will look like if you push to the - public [Docker.io](http://index.docker.io) registry. -- Only the authenticated user can push to their *username* namespace on - a [Docker.io](http://index.docker.io) repository. -- User images are not curated, it is therefore up to you whether or not - you trust the creator of this image. - -### Private repositories - -You can also create private repositories on -[Docker.io](https://index.docker.io/plans/). These allow you to store -images that you don't want to share publicly. Only authenticated users -can push to private repositories. - -## Find Public Images on Docker.io - -You can search the [Docker.io](https://index.docker.io) registry or -using the command line interface. Searching can find images by name, -user name or description: - - $ sudo docker help search - Usage: docker search NAME - - Search the docker index for images - - -notrunc=false: Don᾿t truncate output - $ sudo docker search centos - Found 25 results matching your query ("centos") - NAME DESCRIPTION - centos - slantview/centos-chef-solo CentOS 6.4 with chef-solo. - ... - -There you can see two example results: `centos` and -`slantview/centos-chef-solo`. The second result -shows that it comes from the public repository of a user, -`slantview/`, while the first result -(`centos`) doesn't explicitly list a repository so -it comes from the trusted top-level namespace. The `/` -character separates a user's repository and the image name. - -Once you have found the image name, you can download it: - - # sudo docker pull - $ sudo docker pull centos - Pulling repository centos - 539c0211cd76: Download complete - -What can you do with that image? Check out the -[*Examples*](/examples/#example-list) and, when you're ready with -your own image, come back here to learn how to share it. - -## Contributing to Docker.io - -Anyone can pull public images from the -[Docker.io](http://index.docker.io) registry, but if you would like to -share one of your own images, then you must register a unique user name -first. You can create your username and login on -[Docker.io](https://index.docker.io/account/signup/), or by running - - $ sudo docker login - -This will prompt you for a username, which will become a public -namespace for your public repositories. - -If your username is available then `docker` will -also prompt you to enter a password and your e-mail address. It will -then automatically log you in. Now you're ready to commit and push your -own images! - -> **Note:** -> Your authentication credentials will be stored in the [`.dockercfg` -> authentication file](#authentication-file). - -## Committing a Container to a Named Image - -When you make changes to an existing image, those changes get saved to a -container's file system. You can then promote that container to become -an image by making a `commit`. In addition to converting the container -to an image, this is also your opportunity to name the image, -specifically a name that includes your user name from -[Docker.io](http://index.docker.io) (as you did a `login` above) and a -meaningful name for the image. - - # format is "sudo docker commit /" - $ sudo docker commit $CONTAINER_ID myname/kickassapp - -## Pushing a repository to its registry - -In order to push an repository to its registry you need to have named an -image, or committed your container to a named image (see above) - -Now you can push this repository to the registry designated by its name -or tag. - - # format is "docker push /" - $ sudo docker push myname/kickassapp - -## Trusted Builds - -Trusted Builds automate the building and updating of images from GitHub, -directly on `docker.io` servers. It works by adding -a commit hook to your selected repository, triggering a build and update -when you push a commit. - -### To setup a trusted build - -1. Create a [Docker.io account](https://index.docker.io/) and login. -2. Link your GitHub account through the `Link Accounts` menu. -3. [Configure a Trusted build](https://index.docker.io/builds/). -4. Pick a GitHub project that has a `Dockerfile` that you want to build. -5. Pick the branch you want to build (the default is the `master` branch). -6. Give the Trusted Build a name. -7. Assign an optional Docker tag to the Build. -8. Specify where the `Dockerfile` is located. The default is `/`. - -Once the Trusted Build is configured it will automatically trigger a -build, and in a few minutes, if there are no errors, you will see your -new trusted build on the [Docker.io](https://index.docker.io) Registry. -It will will stay in sync with your GitHub repo until you deactivate the -Trusted Build. - -If you want to see the status of your Trusted Builds you can go to your -[Trusted Builds page](https://index.docker.io/builds/) on the Docker -index, and it will show you the status of your builds, and the build -history. - -Once you`ve created a Trusted Build you can deactivate or delete it. You -cannot however push to a Trusted Build with the `docker push` command. -You can only manage it by committing code to your GitHub repository. - -You can create multiple Trusted Builds per repository and configure them -to point to specific `Dockerfile`'s or Git branches. - -## Private Registry - -Private registries are possible by hosting [your own -registry](https://github.com/dotcloud/docker-registry). - -> **Note**: -> You can also use private repositories on -> [Docker.io](https://index.docker.io/plans/). - -To push or pull to a repository on your own registry, you must prefix -the tag with the address of the registry's host (a `.` or `:` is used to -identify a host), like this: - - # Tag to create a repository with the full registry location. - # The location (e.g. localhost.localdomain:5000) becomes - # a permanent part of the repository name - $ sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name - - # Push the new repository to its home location on localhost - $ sudo docker push localhost.localdomain:5000/repo_name - -Once a repository has your registry's host name as part of the tag, you -can push and pull it like any other repository, but it will **not** be -searchable (or indexed at all) on [Docker.io](http://index.docker.io), and there will be -no user name checking performed. Your registry will function completely -independently from the [Docker.io](http://index.docker.io) registry. - - - -See also - -[Docker Blog: How to use your own registry]( -http://blog.docker.io/2013/07/how-to-use-your-own-registry/) - -## Authentication File - -The authentication is stored in a json file, `.dockercfg` -located in your home directory. It supports multiple registry -urls. - -`docker login` will create the "[https://index.docker.io/v1/]( -https://index.docker.io/v1/)" key. - -`docker login https://my-registry.com` will create the -"[https://my-registry.com](https://my-registry.com)" key. - -For example: - - { - "https://index.docker.io/v1/": { - "auth": "xXxXxXxXxXx=", - "email": "email@example.com" - }, - "https://my-registry.com": { - "auth": "XxXxXxXxXxX=", - "email": "email@my-registry.com" - } - } - -The `auth` field represents -`base64(:)` diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md new file mode 100644 index 0000000000..bd8b0d2c2e --- /dev/null +++ b/docs/sources/userguide/dockerimages.md @@ -0,0 +1,399 @@ +page_title: Working with Docker Images +page_description: How to work with Docker images. +page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker.io, collaboration + +# Working with Docker Images + +In the [introduction](/introduction/) we've discovered that Docker +images are the basis of containers. In the +[previous](/userguide/dockerizing/) [sections](/userguide/usingdocker/) +we've used Docker images that already exist, for example the `ubuntu` +image and the `training/webapp` image. + +We've also discovered that Docker stores downloaded images on the Docker +host. If an image isn't already present on the host then it'll be +downloaded from a registry: by default the +[Docker.io](https://index.docker.io) public registry. + +In this section we're going to explore Docker images a bit more +including: + +* Managing and working with images locally on your Docker host; +* Creating basic images; +* Uploading images to [Docker.io](https://index.docker.io). + +## Listing images on the host + +Let's start with listing the images we have locally on our host. You can +do this using the `docker images` command like so: + + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + training/webapp latest fc77f57ad303 3 weeks ago 280.5 MB + ubuntu 13.10 5e019ab7bf6d 4 weeks ago 180 MB + ubuntu saucy 5e019ab7bf6d 4 weeks ago 180 MB + ubuntu 12.04 74fe38d11401 4 weeks ago 209.6 MB + ubuntu precise 74fe38d11401 4 weeks ago 209.6 MB + ubuntu 12.10 a7cf8ae4e998 4 weeks ago 171.3 MB + ubuntu quantal a7cf8ae4e998 4 weeks ago 171.3 MB + ubuntu 14.04 99ec81b80c55 4 weeks ago 266 MB + ubuntu latest 99ec81b80c55 4 weeks ago 266 MB + ubuntu trusty 99ec81b80c55 4 weeks ago 266 MB + ubuntu 13.04 316b678ddf48 4 weeks ago 169.4 MB + ubuntu raring 316b678ddf48 4 weeks ago 169.4 MB + ubuntu 10.04 3db9c44f4520 4 weeks ago 183 MB + ubuntu lucid 3db9c44f4520 4 weeks ago 183 MB + +We can see the images we've previously used in our [user guide](/userguide/). +Each has been downloaded from [Docker.io](https://index.docker.io) when we +launched a container using that image. + +We can see three crucial pieces of information about our images in the listing. + +* What repository they came from, for example `ubuntu`. +* The tags for each image, for example `14.04`. +* The image ID of each image. + +A repository potentially holds multiple variants of an image. In the case of +our `ubuntu` image we can see multiple variants covering Ubuntu 10.04, 12.04, +12.10, 13.04, 13.10 and 14.04. Each variant is identified by a tag and you can +refer to a tagged image like so: + + ubuntu:14.04 + +So when we run a container we refer to a tagged image like so: + + $ sudo docker run -t -i ubuntu:14.04 /bin/bash + +If instead we wanted to build an Ubuntu 12.04 image we'd use: + + $ sudo docker run -t -i ubuntu:12.04 /bin/bash + +If you don't specify a variant, for example you just use `ubuntu`, then Docker +will default to using the `ubunut:latest` image. + +> **Tip:** +> We recommend you always use a specific tagged image, for example +> `ubuntu:12.04`. That way you always know exactly what variant of an image is +> being used. + +## Getting a new image + +So how do we get new images? Well Docker will automatically download any image +we use that isn't already present on the Docker host. But this can potentially +add some time to the launch of a container. If we want to pre-load an image we +can download it using the `docker pull` command. Let's say we'd like to +download the `centos` image. + + $ sudo docker pull centos + Pulling repository centos + b7de3133ff98: Pulling dependent layers + 5cc9e91966f7: Pulling fs layer + 511136ea3c5a: Download complete + ef52fb1fe610: Download complete + . . . + +We can see that each layer of the image has been pulled down and now we +can run a container from this image and we won't have to wait to +download the image. + + $ sudo docker run -t -i centos /bin/bash + bash-4.1# + +## Finding images + +One of the features of Docker is that a lot of people have created Docker +images for a variety of purposes. Many of these have been uploaded to +[Docker.io](https://index.docker.io). We can search these images on the +[Docker.io](https://index.docker.io) website. + +![indexsearch](/userguide/search.png) + +We can also search for images on the command line using the `docker search` +command. Let's say our team wants an image with Ruby and Sinatra installed on +which to do our web application development. We can search for a suitable image +by using the `docker search` command to find all the images that contain the +term `sinatra`. + + $ sudo docker search sinatra + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + training/sinatra Sinatra training image 0 [OK] + marceldegraaf/sinatra Sinatra test app 0 + mattwarren/docker-sinatra-demo 0 [OK] + luisbebop/docker-sinatra-hello-world 0 [OK] + bmorearty/handson-sinatra handson-ruby + Sinatra for Hands on with D... 0 + subwiz/sinatra 0 + bmorearty/sinatra 0 + . . . + +We can see we've returned a lot of images that use the term `sinatra`. We've +returned a list of image names, descriptions, Stars (which measure the social +popularity of images - if a user likes an image then they can "star" it), and +the Official and Automated build statuses. Official repositories are built and +maintained by the [Stackbrew](https://github.com/dotcloud/stackbrew) project, +and Automated repositories are [Automated Builds]( +/userguide/dockerrepos/#automated-builds) that allow you to validate the source +and content of an image. + +We've reviewed the images available to use and we decided to use the +`training/sinatra` image. So far we've seen two types of images repositories, +images like `ubuntu`, which are called base or root images. These base images +are provided by Docker Inc and are built, validated and supported. These can be +identified by their single word names. + +We've also seen user images, for example the `training/sinatra` image we've +chosen. A user image belongs to a member of the Docker community and is built +and maintained by them. You can identify user images as they are always +prefixed with the user name, here `training`, of the user that created them. + +## Pulling our image + +We've identified a suitable image, `training/sinatra`, and now we can download it using the `docker pull` command. + + $ sudo docker pull training/sinatra + +The team can now use this image by run their own containers. + + $ sudo docker run -t -i training/sinatra /bin/bash + root@a8cb6ce02d85:/# + +## Creating our own images + +The team has found the `training/sinatra` image pretty useful but it's not quite what +they need and we need to make some changes to it. There are two ways we can +update and create images. + +1. We can update a container created from an image and commit the results to an image. +2. We can use a `Dockerfile` to specify instructions to create an image. + +### Updating and committing an image + +To update an image we first need to create a container from the image +we'd like to update. + + $ sudo docker run -t -i training/sinatra /bin/bash + root@0b2616b0e5a8:/# + +> **Note:** +> Take note of the container ID that has been created, `0b2616b0e5a8`, as we'll +> need it in a moment. + +Inside our running container let's add the `json` gem. + + root@0b2616b0e5a8:/# gem install json + +Once this has completed let's exit our container using the `exit` +command. + +Now we have a container with the change we want to make. We can then +commit a copy of this container to an image using the `docker commit` +command. + + $ sudo docker commit -m="Added json gem" -a="Kate Smith" \ + 0b2616b0e5a8 ouruser/sinatra:v2 + 4f177bd27a9ff0f6dc2a830403925b5360bfe0b93d476f7fc3231110e7f71b1c + +Here we've used the `docker commit` command. We've specified two flags: `-m` +and `-a`. The `-m` flag allows us to specify a commit message, much like you +would with a commit on a version control system. The `-a` flag allows us to +specify an author for our update. + +We've also specified the container we want to create this new image from, +`0b2616b0e5a8` (the ID we recorded earlier) and we've specified a target for +the image: + + ouruser/sinatra:v2 + +Let's break this target down. It consists of a new user, `ouruser`, that we're +writing this image to. We've also specified the name of the image, here we're +keeping the original image name `sinatra`. Finally we're specifying a tag for +the image: `v2`. + +We can then look at our new `ouruser/sinatra` image using the `docker images` +command. + + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + training/sinatra latest 5bc342fa0b91 10 hours ago 446.7 MB + ouruser/sinatra v2 3c59e02ddd1a 10 hours ago 446.7 MB + ouruser/sinatra latest 5db5f8471261 10 hours ago 446.7 MB + +To use our new image to create a container we can then: + + $ sudo docker run -t -i ouruser/sinatra:v2 /bin/bash + root@78e82f680994:/# + +### Building an image from a `Dockerfile` + +Using the `docker commit` command is a pretty simple way of extending an image +but it's a bit cumbersome and it's not easy to share a development process for +images amongst a team. Instead we can use a new command, `docker build`, to +build new images from scratch. + +To do this we create a `Dockerfile` that contains a set of instructions that +tell Docker how to build our image. + +Let's create a directory and a `Dockerfile` first. + + $ mkdir sinatra + $ cd sinatra + $ touch Dockerfile + +Each instructions creates a new layer of the image. Let's look at a simple +example now for building our own Sinatra image for our development team. + + # This is a comment + FROM ubuntu:14.04 + MAINTAINER Kate Smith + RUN apt-get -qq update + RUN apt-get -qqy install ruby ruby-dev + RUN gem install sinatra + +Let's look at what our `Dockerfile` does. Each instruction prefixes a statement and is capitalized. + + INSTRUCTION statement + +> **Note:** +> We use `#` to indicate a comment + +The first instruction `FROM` tells Docker what the source of our image is, in +this case we're basing our new image on an Ubuntu 14.04 image. + +Next we use the `MAINTAINER` instruction to specify who maintains our new image. + +Lastly, we've specified three `RUN` instructions. A `RUN` instruction executes +a command inside the image, for example installing a package. Here we're +updating our APT cache, installing Ruby and RubyGems and then installing the +Sinatra gem. + +> **Note:** +> There are [a lot more instructions available to us in a Dockerfile](/reference/builder). + +Now let's take our `Dockerfile` and use the `docker build` command to build an image. + + $ sudo docker build -t="ouruser/sinatra:v2" . + Uploading context 2.56 kB + Uploading context + Step 0 : FROM ubuntu:14.04 + ---> 99ec81b80c55 + Step 1 : MAINTAINER Kate Smith + ---> Running in 7c5664a8a0c1 + ---> 2fa8ca4e2a13 + Removing intermediate container 7c5664a8a0c1 + Step 2 : RUN apt-get -qq update + ---> Running in b07cc3fb4256 + ---> 50d21070ec0c + Removing intermediate container b07cc3fb4256 + Step 3 : RUN apt-get -qqy install ruby ruby-dev + ---> Running in a5b038dd127e + Selecting previously unselected package libasan0:amd64. + (Reading database ... 11518 files and directories currently installed.) + Preparing to unpack .../libasan0_4.8.2-19ubuntu1_amd64.deb ... + . . . + Setting up ruby (1:1.9.3.4) ... + Setting up ruby1.9.1 (1.9.3.484-2ubuntu1) ... + Processing triggers for libc-bin (2.19-0ubuntu6) ... + ---> 2acb20f17878 + Removing intermediate container a5b038dd127e + Step 4 : RUN gem install sinatra + ---> Running in 5e9d0065c1f7 + . . . + Successfully installed rack-protection-1.5.3 + Successfully installed sinatra-1.4.5 + 4 gems installed + ---> 324104cde6ad + Removing intermediate container 5e9d0065c1f7 + Successfully built 324104cde6ad + +We've specified our `docker build` command and used the `-t` flag to identify +our new image as belonging to the user `ouruser`, the repository name `sinatra` +and given it the tag `v2`. + +We've also specified the location of our `Dockerfile` using the `.` to +indicate a `Dockerfile` in the current directory. + +> **Note::** +> You can also specify a path to a `Dockerfile`. + +Now we can see the build process at work. The first thing Docker does is +upload the build context: basically the contents of the directory you're +building in. This is done because the Docker daemon does the actual +build of the image and it needs the local context to do it. + +Next we can see each instruction in the `Dockerfile` being executed +step-by-step. We can see that each step creates a new container, runs +the instruction inside that container and then commits that change - +just like the `docker commit` work flow we saw earlier. When all the +instructions have executed we're left with the `324104cde6ad` image +(also helpfully tagged as `ouruser/sinatra:v2`) and all intermediate +containers will get removed to clean things up. + +We can then create a container from our new image. + + $ sudo docker run -t -i ouruser/sinatra /bin/bash + root@8196968dac35:/# + +> **Note:** +> This is just the briefest introduction to creating images. We've +> skipped a whole bunch of other instructions that you can use. We'll see more of +> those instructions in later sections of the Guide or you can refer to the +> [`Dockerfile`](/reference/builder/) reference for a +> detailed description and examples of every instruction. + +## Setting tags on an image + +You can also add a tag to an existing image after you commit or build it. We +can do this using the `docker tag` command. Let's add a new tag to our +`ouruser/sinatra` image. + + $ sudo docker tag 5db5f8471261 ouruser/sinatra:devel + +The `docker tag` command takes the ID of the image, here `5db5f8471261`, and our +user name, the repository name and the new tag. + +Let's see our new tag using the `docker images` command. + + $ sudo docker images ouruser/sinatra + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + ouruser/sinatra latest 5db5f8471261 11 hours ago 446.7 MB + ouruser/sinatra devel 5db5f8471261 11 hours ago 446.7 MB + ouruser/sinatra v2 5db5f8471261 11 hours ago 446.7 MB + +## Push an image to Docker.io + +Once you've built or created a new image you can push it to [Docker.io]( +https://index.docker.io) using the `docker push` command. This allows you to +share it with others, either publicly, or push it into [a private +repository](https://index.docker.io/plans/). + + $ sudo docker push ouruser/sinatra + The push refers to a repository [ouruser/sinatra] (len: 1) + Sending image list + Pushing repository ouruser/sinatra (3 tags) + . . . + +## Remove an image from the host + +You can also remove images on your Docker host in a way [similar to +containers]( +/userguide/usingdocker) using the `docker rmi` command. + +Let's delete the `training/sinatra` image as we don't need it anymore. + + $ docker rmi training/sinatra + Untagged: training/sinatra:latest + Deleted: 5bc342fa0b91cabf65246837015197eecfa24b2213ed6a51a8974ae250fedd8d + Deleted: ed0fffdcdae5eb2c3a55549857a8be7fc8bc4241fb19ad714364cbfd7a56b22f + Deleted: 5c58979d73ae448df5af1d8142436d81116187a7633082650549c52c3a2418f0 + +> **Note:** In order to remove an image from the host, please make sure +> that there are no containers actively based on it. + +# Next steps + +Until now we've seen how to build individual applications inside Docker +containers. Now learn how to build whole application stacks with Docker +by linking together multiple Docker containers. + +Go to [Linking Containers Together](/userguide/dockerlinks). + diff --git a/docs/sources/userguide/dockerio.md b/docs/sources/userguide/dockerio.md new file mode 100644 index 0000000000..e5c6c6dace --- /dev/null +++ b/docs/sources/userguide/dockerio.md @@ -0,0 +1,73 @@ +page_title: Getting started with Docker.io +page_description: Introductory guide to getting an account on Docker.io +page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, central service, services, how to, container, containers, automation, collaboration, collaborators, registry, repo, repository, technology, github webhooks, trusted builds + +# Getting Started with Docker.io + +*How do I use Docker.io?* + +In this section we're going to introduce you, very quickly!, to +[Docker.io](https://index.docker.io) and create an account. + +[Docker.io](https://www.docker.io) is the central hub for Docker. It +helps you to manage Docker and its components. It provides services such +as: + +* Hosting images. +* User authentication. +* Automated image builds and work flow tools like build triggers and web + hooks. +* Integration with GitHub and BitBucket. + +Docker.io helps you collaborate with colleagues and get the most out of +Docker. + +In order to use Docker.io you will need to register an account. Don't +panic! It's totally free and really easy. + +## Creating a Docker.io Account + +There are two ways you can create a Docker.io account: + +* Via the web, or +* Via the command line. + +### Sign up via the web! + +Fill in the [sign-up form](https://www.docker.io/account/signup/) and +choose your user name and specify some details such as an email address. + +![Register using the sign-up page](/userguide/register-web.png) + +### Signup via the command line + +You can also create a Docker.io account via the command line using the +`docker login` command. + + $ sudo docker login + +### Confirm your email + +Once you've filled in the form then check your email for a welcome +message and activate your account. + +![Confirm your registration](/userguide/register-confirm.png) + +### Login! + +Then you can login using the web console: + +![Login using the web console](/userguide/login-web.png) + +Or via the command line and the `docker login` command: + + $ sudo docker login + +Now your Docker.io account is active and ready for you to use! + +## Next steps + +Now let's start Dockerizing applications with our "Hello World!" exercise. + +Go to [Dockerizing Applications](/userguide/dockerizing). + diff --git a/docs/sources/userguide/dockerizing.md b/docs/sources/userguide/dockerizing.md new file mode 100644 index 0000000000..79a2066c62 --- /dev/null +++ b/docs/sources/userguide/dockerizing.md @@ -0,0 +1,193 @@ +page_title: Dockerizing Applications: A "Hello World!" +page_description: A simple "Hello World!" exercise that introduced you to Docker. +page_keywords: docker guide, docker, docker platform, virtualization framework, how to, dockerize, dockerizing apps, dockerizing applications, container, containers + +# Dockerizing Applications: A "Hello World!" + +*So what's this Docker thing all about?* + +Docker allows you to run applications inside containers. Running an +application inside a container takes a single command: `docker run`. + +## Hello World! + +Let's try it now. + + $ sudo docker run ubuntu:14.04 /bin/echo "Hello World!" + Hello World! + +And you just launched your first container! + +So what just happened? Let's step through what the `docker run` command +did. + +First we specified the `docker` binary and the command we wanted to +execute, `run`. The `docker run` combination *runs* containers. + +Next we specified an image: `ubuntu:14.04`. This is the source of the container +we ran. Docker calls this an image. In this case we used an Ubuntu 14.04 +operating system image. + +When you specify an image, Docker looks first for the image on your +Docker host. If it can't find it then it downloads the image from the public +image registry: [Docker.io](https://index.docker.io). + +Next we told Docker what command to run inside our new container: + + /bin/echo "Hello World!" + +When our container was launched Docker created a new Ubuntu 14.04 +environment and then executed the `/bin/echo` command inside it. We saw +the result on the command line: + + Hello World! + +So what happened to our container after that? Well Docker containers +only run as long as the command you specify is active. Here, as soon as +`Hello World!` was echoed, the container stopped. + +## An Interactive Container + +Let's try the `docker run` command again, this time specifying a new +command to run in our container. + + $ sudo docker run -t -i ubuntu:14.04 /bin/bash + root@af8bae53bdd3:/# + +Here we've again specified the `docker run` command and launched an +`ubuntu:14.04` image. But we've also passed in two flags: `-t` and `-i`. +The `-t` flag assigns a pseudo-tty or terminal inside our new container +and the `-i` flag allows us to make an interactive connection by +grabbing the standard in (`STDIN`) of the container. + +We've also specified a new command for our container to run: +`/bin/bash`. This will launch a Bash shell inside our container. + +So now when our container is launched we can see that we've got a +command prompt inside it: + + root@af8bae53bdd3:/# + +Let's try running some commands inside our container: + + root@af8bae53bdd3:/# pwd + / + root@af8bae53bdd3:/# ls + bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var + +You can see we've run the `pwd` to show our current directory and can +see we're in the `/` root directory. We've also done a directory listing +of the root directory which shows us what looks like a typical Linux +file system. + +You can play around inside this container and when you're done you can +use the `exit` command to finish. + + root@af8bae53bdd3:/# exit + +As with our previous container, once the Bash shell process has +finished, the container is stopped. + +## A Daemonized Hello World! + +Now a container that runs a command and then exits has some uses but +it's not overly helpful. Let's create a container that runs as a daemon, +like most of the applications we're probably going to run with Docker. + +Again we can do this with the `docker run` command: + + $ sudo docker run -d ubuntu:14.04 /bin/sh -c "while true; do echo hello world; sleep 1; done" + 1e5535038e285177d5214659a068137486f96ee5c2e85a4ac52dc83f2ebe4147 + +Wait what? Where's our "Hello World!" Let's look at what we've run here. +It should look pretty familiar. We ran `docker run` but this time we +specified a flag: `-d`. The `-d` flag tells Docker to run the container +and put it in the background, to daemonize it. + +We also specified the same image: `ubuntu:14.04`. + +Finally, we specified a command to run: + + /bin/sh -c "while true; do echo hello world; sleep 1; done" + +This is the (hello) world's silliest daemon: a shell script that echoes +`hello world` forever. + +So why aren't we seeing any `hello world`'s? Instead Docker has returned +a really long string: + + 1e5535038e285177d5214659a068137486f96ee5c2e85a4ac52dc83f2ebe4147 + +This really long string is called a *container ID*. It uniquely +identifies a container so we can work with it. + +> **Note:** +> The container ID is a bit long and unwieldy and a bit later +> on we'll see a shorter ID and some ways to name our containers to make +> working with them easier. + +We can use this container ID to see what's happening with our `hello +world` daemon. + +Firstly let's make sure our container is running. We can +do that with the `docker ps` command. The `docker ps` command queries +the Docker daemon for information about all the container it knows +about. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 1e5535038e28 ubuntu:14.04 /bin/sh -c 'while tr 2 minutes ago Up 1 minute insane_babbage + +Here we can see our daemonized container. The `docker ps` has returned some useful +information about it, starting with a shorter variant of its container ID: +`1e5535038e28`. + +We can also see the image we used to build it, `ubuntu:14.04`, the command it +is running, its status and an automatically assigned name, +`insane_babbage`. + +> **NoteL** +> Docker automatically names any containers you start, a +> little later on we'll see how you can specify your own names. + +Okay, so we now know it's running. But is it doing what we asked it to do? To see this +we're going to look inside the container using the `docker logs` +command. Let's use the container name Docker assigned. + + $ sudo docker logs insane_babbage + hello world + hello world + hello world + . . . + +The `docker logs` command looks inside the container and returns its standard +output: in this case the output of our command `hello world`. + +Awesome! Our daemon is working and we've just created our first +Dockerized application! + +Now we've established we can create our own containers let's tidy up +after ourselves and stop our daemonized container. To do this we use the +`docker stop` command. + + $ sudo docker stop insane_babbage + insane_babbage + +The `docker stop` command tells Docker to politely stop the running +container. If it succeeds it will return the name of the container it +has just stopped. + +Let's check it worked with the `docker ps` command. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + +Excellent. Our container has been stopped. + +# Next steps + +Now we've seen how simple it is to get started with Docker let's learn how to +do some more advanced tasks. + +Go to [Working With Containers](/userguide/usingdocker). + diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md new file mode 100644 index 0000000000..5c94879bf0 --- /dev/null +++ b/docs/sources/userguide/dockerlinks.md @@ -0,0 +1,241 @@ +page_title: Linking Containers Together +page_description: Learn how to connect Docker containers together. +page_keywords: Examples, Usage, user guide, links, linking, docker, documentation, examples, names, name, container naming, port, map, network port, network + +# Linking Containers Together + +In [the Using Docker section](/userguide/usingdocker) we touched on +connecting to a service running inside a Docker container via a network +port. This is one of the ways that you can interact with services and +applications running inside Docker containers. In this section we're +going to give you a refresher on connecting to a Docker container via a +network port as well as introduce you to the concepts of container +linking. + +## Network port mapping refresher + +In [the Using Docker section](/userguide/usingdocker) we created a +container that ran a Python Flask application. + + $ sudo docker run -d -P training/webapp python app.py + +> **Note:** +> Containers have an internal network and an IP address +> (remember we used the `docker inspect` command to show the container's +> IP address in the [Using Docker](/userguide/usingdocker/) section). +> Docker can have a variety of network configurations. You can see more +> information on Docker networking [here](/articles/networking/). + +When we created that container we used the `-P` flag to automatically map any +network ports inside that container to a random high port from the range 49000 +to 49900 on our Docker host. When we subsequently ran `docker ps` we saw that +port 5000 was bound to port 49155. + + $ sudo docker ps nostalgic_morse + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + bc533791f3f5 training/webapp:latest python app.py 5 seconds ago Up 2 seconds 0.0.0.0:49155->5000/tcp nostalgic_morse + +We also saw how we can bind a container's ports to a specific port using +the `-p` flag. + + $ sudo docker run -d -p 5000:5000 training/webapp python app.py + +And we saw why this isn't such a great idea because it constrains us to +only one container on that specific port. + +There are also a few other ways we can configure the `-p` flag. By +default the `-p` flag will bind the specified port to all interfaces on +the host machine. But we can also specify a binding to a specific +interface, for example only to the `localhost`. + + $ sudo docker run -d -p 127.0.0.1:5000:5000 training/webapp python app.py + +This would bind port 5000 inside the container to port 5000 on the +`localhost` or `127.0.0.1` interface on the host machine. + +Or to bind port 5000 of the container to a dynamic port but only on the +`localhost` we could: + + $ sudo docker run -d -p 127.0.0.1::5000 training/webapp python app.py + +We can also bind UDP ports by adding a trailing `/udp`, for example: + + $ sudo docker run -d -p 127.0.0.1:5000:5000/udp training/webapp python app.py + +We also saw the useful `docker port` shortcut which showed us the +current port bindings, this is also useful for showing us specific port +configurations. For example if we've bound the container port to the +`localhost` on the host machine this will be shown in the `docker port` +output. + + $ docker port nostalgic_morse + 127.0.0.1:49155 + +> **Note:** +> The `-p` flag can be used multiple times to configure multiple ports. + +## Docker Container Linking + +Network port mappings are not the only way Docker containers can connect +to one another. Docker also has a linking system that allows you to link +multiple containers together and share connection information between +them. Docker linking will create a parent child relationship where the +parent container can see selected information about its child. + +## Container naming + +To perform this linking Docker relies on the names of your containers. +We've already seen that each container we create has an automatically +created name, indeed we've become familiar with our old friend +`nostalgic_morse` during this guide. You can also name containers +yourself. This naming provides two useful functions: + +1. It's useful to name containers that do specific functions in a way + that makes it easier for you to remember them, for example naming a + container with a web application in it `web`. + +2. It provides Docker with reference point that allows it to refer to other + containers, for example link container `web` to container `db`. + +You can name your container by using the `--name` flag, for example: + + $ sudo docker run -d -P --name web training/webapp python app.py + +You can see we've launched a new container and used the `--name` flag to +call the container `web`. We can see the container's name using the +`docker ps` command. + + $ sudo docker ps -l + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + aed84ee21bde training/webapp:latest python app.py 12 hours ago Up 2 seconds 0.0.0.0:49154->5000/tcp web + +We can also use `docker inspect` to return the container's name. + + $ sudo docker inspect -f "{{ .Name }}" aed84ee21bde + /web + +> **Note:** +> Container names have to be unique. That means you can only call +> one container `web`. If you want to re-use a container name you must delete the +> old container with the `docker rm` command before you can create a new +> container with the same name. As an alternative you can use the `--rm` +> flag with the `docker run` command. This will delete the container +> immediately after it stops. + +## Container Linking + +Links allow containers to discover and securely communicate with each +other. To create a link you use the `--link` flag. Let's create a new +container, this one a database. + + $ sudo docker run -d --name db training/postgres + +Here we've created a new container called `db` using the `training/postgres` +image, which contains a PostgreSQL database. + +Now let's create a new `web` container and link it with our `db` container. + + $ sudo docker run -d -P --name web --link db:db training/webapp python app.py + +This will link the new `web` container with the `db` container we created +earlier. The `--link` flag takes the form: + + --link name:alias + +Where `name` is the name of the container we're linking to and `alias` is an +alias for the link name. We'll see how that alias gets used shortly. + +Let's look at our linked containers using `docker ps`. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 349169744e49 training/postgres:latest su postgres -c '/usr About a minute ago Up About a minute 5432/tcp db + aed84ee21bde training/webapp:latest python app.py 16 hours ago Up 2 minutes 0.0.0.0:49154->5000/tcp db/web,web + +We can see our named containers, `db` and `web`, and we can see that the `web` +containers also shows `db/web` in the `NAMES` column. This tells us that the +`web` container is linked to the `db` container in a parent/child relationship. + +So what does linking the containers do? Well we've discovered the link creates +a parent-child relationship between the two containers. The parent container, +here `db`, can access information on the child container `web`. To do this +Docker creates a secure tunnel between the containers without the need to +expose any ports externally on the container. You'll note when we started the +`db` container we did not use either of the `-P` or `-p` flags. As we're +linking the containers we don't need to expose the PostgreSQL database via the +network. + +Docker exposes connectivity information for the parent container inside the +child container in two ways: + +* Environment variables, +* Updating the `/etc/host` file. + +Let's look first at the environment variables Docker sets. Inside the `web` +container let's run the `env` command to list the container's environment +variables. + + root@aed84ee21bde:/opt/webapp# env + HOSTNAME=aed84ee21bde + . . . + DB_NAME=/web/db + DB_PORT=tcp://172.17.0.5:5432 + DB_PORT_5000_TCP=tcp://172.17.0.5:5432 + DB_PORT_5000_TCP_PROTO=tcp + DB_PORT_5000_TCP_PORT=5432 + DB_PORT_5000_TCP_ADDR=172.17.0.5 + . . . + +> **Note**: +> These Environment variables are only set for the first process in the +> container. Similarly, some daemons (such as `sshd`) +> will scrub them when spawning shells for connection. + +We can see that Docker has created a series of environment variables with +useful information about our `db` container. Each variables is prefixed with +`DB` which is populated from the `alias` we specified above. If our `alias` +were `db1` the variables would be prefixed with `DB1_`. You can use these +environment variables to configure your applications to connect to the database +on the `db` container. The connection will be secure, private and only the +linked `web` container will be able to talk to the `db` container. + +In addition to the environment variables Docker adds a host entry for the +linked parent to the `/etc/hosts` file. Let's look at this file on the `web` +container now. + + root@aed84ee21bde:/opt/webapp# cat /etc/hosts + 172.17.0.7 aed84ee21bde + . . . + 172.17.0.5 db + +We can see two relevant host entries. The first is an entry for the `web` +container that uses the Container ID as a host name. The second entry uses the +link alias to reference the IP address of the `db` container. Let's try to ping +that host now via this host name. + + root@aed84ee21bde:/opt/webapp# apt-get install -yqq inetutils-ping + root@aed84ee21bde:/opt/webapp# ping db + PING db (172.17.0.5): 48 data bytes + 56 bytes from 172.17.0.5: icmp_seq=0 ttl=64 time=0.267 ms + 56 bytes from 172.17.0.5: icmp_seq=1 ttl=64 time=0.250 ms + 56 bytes from 172.17.0.5: icmp_seq=2 ttl=64 time=0.256 ms + +> **Note:** +> We had to install `ping` because our container didn't have it. + +We've used the `ping` command to ping the `db` container using it's host entry +which resolves to `172.17.0.5`. We can make use of this host entry to configure +an application to make use of our `db` container. + +> **Note:** +> You can link multiple child containers to a single parent. For +> example, we could have multiple web containers attached to our `db` +> container. + +# Next step + +Now we know how to link Docker containers together the next step is +learning how to manage data, volumes and mounts inside our containers. + +Go to [Managing Data in Containers](/userguide/dockervolumes). + diff --git a/docs/sources/userguide/dockerrepos.md b/docs/sources/userguide/dockerrepos.md new file mode 100644 index 0000000000..d18ec13ccd --- /dev/null +++ b/docs/sources/userguide/dockerrepos.md @@ -0,0 +1,176 @@ +page_title: Working with Docker.io +page_description: Learning how to use Docker.io to manage images and work flow +page_keywords: repo, Docker.io, Docker Hub, registry, index, repositories, usage, pull image, push image, image, documentation + +# Working with Docker.io + +So far we've seen a lot about how to use Docker on the command line and +your local host. We've seen [how to pull down +images](/userguide/usingdocker/) that you can run your containers from +and we've seen how to [create your own images](/userguide/dockerimages). + +Now we're going to learn a bit more about +[Docker.io](https://index.docker.io) and how you can use it to enhance +your Docker work flows. + +[Docker.io](https://index.docker.io) is the public registry that Docker +Inc maintains. It contains a huge collection of images, over 15,000, +that you can download and use to build your containers. It also provides +authentication, structure (you can setup teams and organizations), work +flow tools like webhooks and build triggers as well as privacy features +like private repositories for storing images you don't want to publicly +share. + +## Docker commands and Docker.io + +Docker acts as a client for these services via the `docker search`, +`pull`, `login` and `push` commands. + +## Searching for images + +As we've already seen we can search the +[Docker.io](https://index.docker.io) registry via it's search interface +or using the command line interface. Searching can find images by name, +user name or description: + + $ sudo docker search centos + NAME DESCRIPTION STARS OFFICIAL TRUSTED + centos Official CentOS 6 Image as of 12 April 2014 88 + tianon/centos CentOS 5 and 6, created using rinse instea... 21 + ... + +There you can see two example results: `centos` and +`tianon/centos`. The second result shows that it comes from +the public repository of a user, `tianon/`, while the first result, +`centos`, doesn't explicitly list a repository so it comes from the +trusted top-level namespace. The `/` character separates a user's +repository and the image name. + +Once you have found the image you want, you can download it: + + $ sudo docker pull centos + Pulling repository centos + 0b443ba03958: Download complete + 539c0211cd76: Download complete + 511136ea3c5a: Download complete + 7064731afe90: Download complete + +The image is now available to run a container from. + +## Contributing to Docker.io + +Anyone can pull public images from the [Docker.io](http://index.docker.io) +registry, but if you would like to share your own images, then you must +register a user first as we saw in the [first section of the Docker User +Guide](/userguide/dockerio/). + +To refresh your memory, you can create your user name and login to +[Docker.io](https://index.docker.io/account/signup/), or by running: + + $ sudo docker login + +This will prompt you for a user name, which will become a public +namespace for your public repositories, for example: + + training/webapp + +Here `training` is the user name and `webapp` is a repository owned by +that user. + +If your user name is available then `docker` will also prompt you to +enter a password and your e-mail address. It will then automatically log +you in. Now you're ready to commit and push your own images! + +> **Note:** +> Your authentication credentials will be stored in the [`.dockercfg` +> authentication file](#authentication-file) in your home directory. + +## Pushing a repository to Docker.io + +In order to push an repository to its registry you need to have named an image, +or committed your container to a named image as we saw +[here](/userguide/dockerimages). + +Now you can push this repository to the registry designated by its name +or tag. + + $ sudo docker push yourname/newimage + +The image will then be uploaded and available for use. + +## Features of Docker.io + +Now let's look at some of the features of Docker.io. You can find more +information [here](/docker-io/). + +* Private repositories +* Organizations and teams +* Automated Builds +* Webhooks + +## Private Repositories + +Sometimes you have images you don't want to make public and share with +everyone. So Docker.io allows you to have private repositories. You can +sign up for a plan [here](https://index.docker.io/plans/). + +## Organizations and teams + +One of the useful aspects of private repositories is that you can share +them only with members of your organization or team. Docker.io lets you +create organizations where you can collaborate with your colleagues and +manage private repositories. You can create and manage an organization +[here](https://index.docker.io/account/organizations/). + +## Automated Builds + +Automated Builds automate the building and updating of images from [GitHub](https://www.github.com) +or [BitBucket](http://bitbucket.com), directly on Docker.io. It works by adding a commit hook to +your selected GitHub or BitBucket repository, triggering a build and update when you push a +commit. + +### To setup an Automated Build + +1. Create a [Docker.io account](https://index.docker.io/) and login. +2. Link your GitHub or BitBucket account through the [`Link Accounts`](https://index.docker.io/account/accounts/) menu. +3. [Configure an Automated Build](https://index.docker.io/builds/). +4. Pick a GitHub or BitBucket project that has a `Dockerfile` that you want to build. +5. Pick the branch you want to build (the default is the `master` branch). +6. Give the Automated Build a name. +7. Assign an optional Docker tag to the Build. +8. Specify where the `Dockerfile` is located. The default is `/`. + +Once the Automated Build is configured it will automatically trigger a +build, and in a few minutes, if there are no errors, you will see your +new Automated Build on the [Docker.io](https://index.docker.io) Registry. +It will stay in sync with your GitHub and BitBucket repository until you +deactivate the Automated Build. + +If you want to see the status of your Automated Builds you can go to your +[Automated Builds page](https://index.docker.io/builds/) on the Docker.io, +and it will show you the status of your builds, and the build history. + +Once you've created an Automated Build you can deactivate or delete it. You +cannot however push to an Automated Build with the `docker push` command. +You can only manage it by committing code to your GitHub or BitBucket +repository. + +You can create multiple Automated Builds per repository and configure them +to point to specific `Dockerfile`'s or Git branches. + +### Build Triggers + +Automated Builds can also be triggered via a URL on Docker.io. This +allows you to rebuild an Automated build image on demand. + +## Webhooks + +Webhooks are attached to your repositories and allow you to trigger an +event when an image or updated image is pushed to the repository. With +a webhook you can specify a target URL and a JSON payload will be +delivered when the image is pushed. + +## Next steps + +Go and use Docker! + diff --git a/docs/sources/userguide/dockervolumes.md b/docs/sources/userguide/dockervolumes.md new file mode 100644 index 0000000000..34cfe05b47 --- /dev/null +++ b/docs/sources/userguide/dockervolumes.md @@ -0,0 +1,142 @@ +page_title: Managing Data in Containers +page_description: How to manage data inside your Docker containers. +page_keywords: Examples, Usage, volume, docker, documentation, user guide, data, volumes + +# Managing Data in Containers + +So far we've been introduced some [basic Docker +concepts](/userguide/usingdocker/), seen how to work with [Docker +images](/userguide/dockerimages/) as well as learned about [networking +and links between containers](/userguide/dockerlinks/). In this section +we're going to discuss how you can manage data inside and between your +Docker containers. + +We're going to look at the two primary ways you can manage data in +Docker. + +* Data volumes, and +* Data volume containers. + +## Data volumes + +A *data volume* is a specially-designated directory within one or more +containers that bypasses the [*Union File +System*](/terms/layer/#ufs-def) to provide several useful features for +persistent or shared data: + +- Data volumes can be shared and reused between containers +- Changes to a data volume are made directly +- Changes to a data volume will not be included when you update an image +- Volumes persist until no containers use them + +### Adding a data volume + +You can add a data volume to a container using the `-v` flag with the +`docker run` command. You can use the `-v` multiple times in a single +`docker run` to mount multiple data volumes. Let's mount a single volume +now in our web application container. + + $ sudo docker run -d -P --name web -v /webapp training/webapp python app.py + +This will create a new volume inside a container at `/webapp`. + +> **Note:** +> You can also use the `VOLUME` instruction in a `Dockerfile` to add one or +> more new volumes to any container created from that image. + +### Mount a Host Directory as a Data Volume + +In addition to creating a volume using the `-v` flag you can also mount a +directory from your own host into a container. + + $ sudo docker run -d -P --name web -v /src/webapp:/opt/webapp training/webapp python app.py + +This will mount the local directory, `/src/webapp`, into the container as the +`/opt/webapp` directory. This is very useful for testing, for example we can +mount our source code inside the container and see our application at work as +we change the source code. The directory on the host must be specified as an +absolute path and if the directory doesn't exist Docker will automatically +create it for you. + +> **Note::** +> This is not available from a `Dockerfile` due the portability +> and sharing purpose of it. As the host directory is, by its nature, +> host-dependent it might not work all hosts. + +Docker defaults to a read-write volume but we can also mount a directory +read-only. + + $ sudo docker run -d -P --name web -v /src/webapp:/opt/webapp:ro training/webapp python app.py + +Here we've mounted the same `/src/webapp` directory but we've added the `ro` +option to specify that the mount should be read-only. + +## Creating and mounting a Data Volume Container + +If you have some persistent data that you want to share between +containers, or want to use from non-persistent containers, it's best to +create a named Data Volume Container, and then to mount the data from +it. + +Let's create a new named container with a volume to share. + + $ docker run -d -v /dbdata --name dbdata training/postgres + +You can then use the `--volumes-from` flag to mount the `/dbdata` volume in another container. + + $ docker run -d --volumes-from dbdata --name db1 training/postgres + +And another: + + $ docker run -d --volumes-from dbdata --name db2 training/postgres + +You can use multiple `-volumes-from` parameters to bring together multiple data +volumes from multiple containers. + +You can also extend the chain by mounting the volume that came from the +`dbdata` container in yet another container via the `db1` or `db2` containers. + + $ docker run -d --name db3 --volumes-from db1 training/postgres + +If you remove containers that mount volumes, including the initial `dbdata` +container, or the subsequent containers `db1` and `db2`, the volumes will not +be deleted until there are no containers still referencing those volumes. This +allows you to upgrade, or effectively migrate data volumes between containers. + +## Backup, restore, or migrate data volumes + +Another useful function we can perform with volumes is use them for +backups, restores or migrations. We do this by using the +`--volumes-from` flag to create a new container that mounts that volume, +like so: + + $ sudo docker run --volumes-from dbdata -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /dbdata + +Here's we've launched a new container and mounted the volume from the +`dbdata` container. We've then mounted a local host directory as +`/backup`. Finally, we've passed a command that uses `tar` to backup the +contents of the `dbdata` volume to a `backup.tar` file inside our +`/backup` directory. When the command completes and the container stops +we'll be left with a backup of our `dbdata` volume. + +You could then to restore to the same container, or another that you've made +elsewhere. Create a new container. + + $ sudo docker run -v /dbdata --name dbdata2 ubuntu + +Then un-tar the backup file in the new container's data volume. + + $ sudo docker run --volumes-from dbdata2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar + +You can use this techniques above to automate backup, migration and +restore testing using your preferred tools. + +# Next steps + +Now we've learned a bit more about how to use Docker we're going to see how to +combine Docker with the services available on +[Docker.io](https://index.docker.io) including Automated Builds and private +repositories. + +Go to [Working with Docker.io](/userguide/dockerrepos). + diff --git a/docs/sources/userguide/index.md b/docs/sources/userguide/index.md new file mode 100644 index 0000000000..7150540433 --- /dev/null +++ b/docs/sources/userguide/index.md @@ -0,0 +1,98 @@ +page_title: The Docker User Guide +page_description: The Docker User Guide home page +page_keywords: docker, introduction, documentation, about, technology, docker.io, user, guide, user's, manual, platform, framework, virtualization, home, intro + +# Welcome to the Docker User Guide + +In the [Introduction](/) you got a taste of what Docker is and how it +works. In this guide we're going to take you through the fundamentals of +using Docker and integrating it into your environment. + +We’ll teach you how to use Docker to: + +* Dockerizing your applications. +* Run your own containers. +* Build Docker images. +* Share your Docker images with others. +* And a whole lot more! + +We've broken this guide into major sections that take you through +the Docker life cycle: + +## Getting Started with Docker.io + +*How do I use Docker.io?* + +Docker.io is the central hub for Docker. It hosts public Docker images +and provides services to help you build and manage your Docker +environment. To learn more; + +Go to [Using Docker.io](/userguide/dockerio). + +## Dockerizing Applications: A "Hello World!" + +*How do I run applications inside containers?* + +Docker offers a *container-based* virtualization platform to power your +applications. To learn how to Dockerize applications and run them. + +Go to [Dockerizing Applications](/userguide/dockerizing). + +## Working with Containers + +*How do I manage my containers?* + +Once you get a grip on running your applications in Docker containers +we're going to show you how to manage those containers. To find out +about how to inspect, monitor and manage containers: + +Go to [Working With Containers](/userguide/usingdocker). + +## Working with Docker Images + +*How can I access, share and build my own images?* + +Once you've learnt how to use Docker it's time to take the next step and +learn how to build your own application images with Docker. + +Go to [Working with Docker Images](/userguide/dockerimages) + +## Linking Containers Together + +Until now we've seen how to build individual applications inside Docker +containers. Now learn how to build whole application stacks with Docker +by linking together multiple Docker containers. + +Go to [Linking Containers Together](/userguide/dockerlinks). + +## Managing Data in Containers + +Now we know how to link Docker containers together the next step is +learning how to manage data, volumes and mounts inside our containers. + +Go to [Managing Data in Containers](/userguide/dockervolumes). + +## Working with Docker.io + +Now we've learned a bit more about how to use Docker we're going to see +how to combine Docker with the services available on Docker.io including +Automated Builds and private repositories. + +Go to [Working with Docker.io](/userguide/dockerrepos). + +## Getting help + +* [Docker homepage](http://www.docker.io/) +* [Docker.io](http://index.docker.io) +* [Docker blog](http://blog.docker.io/) +* [Docker documentation](http://docs.docker.io/) +* [Docker Getting Started Guide](http://www.docker.io/gettingstarted/) +* [Docker code on GitHub](https://github.com/dotcloud/docker) +* [Docker mailing + list](https://groups.google.com/forum/#!forum/docker-user) +* Docker on IRC: irc.freenode.net and channel #docker +* [Docker on Twitter](http://twitter.com/docker) +* Get [Docker help](http://stackoverflow.com/search?q=docker) on + StackOverflow +* [Docker.com](http://www.docker.com/) + diff --git a/docs/sources/userguide/login-web.png b/docs/sources/userguide/login-web.png new file mode 100644 index 0000000000..8fe04d829e Binary files /dev/null and b/docs/sources/userguide/login-web.png differ diff --git a/docs/sources/userguide/register-confirm.png b/docs/sources/userguide/register-confirm.png new file mode 100644 index 0000000000..4057cbe965 Binary files /dev/null and b/docs/sources/userguide/register-confirm.png differ diff --git a/docs/sources/userguide/register-web.png b/docs/sources/userguide/register-web.png new file mode 100644 index 0000000000..2c950d2e4b Binary files /dev/null and b/docs/sources/userguide/register-web.png differ diff --git a/docs/sources/userguide/search.png b/docs/sources/userguide/search.png new file mode 100644 index 0000000000..27370741a7 Binary files /dev/null and b/docs/sources/userguide/search.png differ diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md new file mode 100644 index 0000000000..2eaf707d1a --- /dev/null +++ b/docs/sources/userguide/usingdocker.md @@ -0,0 +1,316 @@ +page_title: Working with Containers +page_description: Learn how to manage and operate Docker containers. +page_keywords: docker, the docker guide, documentation, docker.io, monitoring containers, docker top, docker inspect, docker port, ports, docker logs, log, Logs + +# Working with Containers + +In the [last section of the Docker User Guide](/userguide/dockerizing) +we launched our first containers. We launched two containers using the +`docker run` command. + +* Containers we ran interactively in the foreground. +* One container we ran daemonized in the background. + +In the process we learned about several Docker commands: + +* `docker ps` - Lists containers. +* `docker logs` - Shows us the standard output of a container. +* `docker stop` - Stops running containers. + +> **Tip:** +> Another way to learn about `docker` commands is our +> [interactive tutorial](https://www.docker.io/gettingstarted). + +The `docker` client is pretty simple. Each action you can take +with Docker is a command and each command can take a series of +flags and arguments. + + # Usage: [sudo] docker [flags] [command] [arguments] .. + # Example: + $ docker run -i -t ubuntu /bin/bash + +Let's see this in action by using the `docker version` command to return +version information on the currently installed Docker client and daemon. + + $ sudo docker version + +This command will not only provide you the version of Docker client and +daemon you are using, but also the version of Go (the programming +language powering Docker). + + Client version: 0.8.0 + Go version (client): go1.2 + + Git commit (client): cc3a8c8 + Server version: 0.8.0 + + Git commit (server): cc3a8c8 + Go version (server): go1.2 + + Last stable version: 0.8.0 + +### Seeing what the Docker client can do + +We can see all of the commands available to us with the Docker client by +running the `docker` binary without any options. + + $ sudo docker + +You will see a list of all currently available commands. + + Commands: + attach Attach to a running container + build Build an image from a Dockerfile + commit Create a new image from a container's changes + . . . + +### Seeing Docker command usage + +You can also zoom in and review the usage for specific Docker commands. + +Try typing Docker followed with a `[command]` to see the usage for that +command: + + $ sudo docker attach + Help output . . . + +Or you can also pass the `--help` flag to the `docker` binary. + + $ sudo docker images --help + +This will display the help text and all available flags: + + Usage: docker attach [OPTIONS] CONTAINER + + Attach to a running container + + --no-stdin=false: Do not attach stdin + --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + + +None of the containers we've run did anything particularly useful +though. So let's build on that experience by running an example web +application in Docker. + +> **Note:** +> You can see a full list of Docker's commands +> [here](/reference/commandline/cli/). + +## Running a Web Application in Docker + +So now we've learnt a bit more about the `docker` client let's move onto +the important stuff: running more containers. So far none of the +containers we've run did anything particularly useful though. So let's +build on that experience by running an example web application in +Docker. + +For our web application we're going to run a Python Flask application. +Let's start with a `docker run` command. + + $ sudo docker run -d -P training/webapp python app.py + +Let's review what our command did. We've specified two flags: `-d` and +`-P`. We've already seen the `-d` flag which tells Docker to run the +container in the background. The `-P` flag is new and tells Docker to +map any required network ports inside our container to our host. This +lets us view our web application. + +We've specified an image: `training/webapp`. This image is a +pre-built image we've created that contains a simple Python Flask web +application. + +Lastly, we've specified a command for our container to run: `python +app.py`. This launches our web application. + +> **Note:** +> You can see more detail on the `docker run` command in the [command +> reference](/reference/commandline/cli/#run) and the [Docker Run +> Reference](/reference/run/). + +## Viewing our Web Application Container + +Now let's see our running container using the `docker ps` command. + + $ sudo docker ps -l + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + bc533791f3f5 training/webapp:latest python app.py 5 seconds ago Up 2 seconds 0.0.0.0:49155->5000/tcp nostalgic_morse + +You can see we've specified a new flag, `-l`, for the `docker ps` +command. This tells the `docker ps` command to return the details of the +*last* container started. + +> **Note:** +> The `docker ps` command only shows running containers. If you want to +> see stopped containers too use the `-a` flag. + +We can see the same details we saw [when we first Dockerized a +container](/userguide/dockerizing) with one important addition in the `PORTS` +column. + + PORTS + 0.0.0.0:49155->5000/tcp + +When we passed the `-P` flag to the `docker run` command Docker mapped any +ports exposed in our image to our host. + +> **Note:** +> We'll learn more about how to expose ports in Docker images when +> [we learn how to build images](/userguide/dockerimages). + +In this case Docker has exposed port 5000 (the default Python Flask +port) on port 49155. + +Network port bindings are very configurable in Docker. In our last +example the `-P` flag is a shortcut for `-p 5000` that makes port 5000 +inside the container to a high port (from the range 49000 to 49900) on +the local Docker host. We can also bind Docker container's to specific +ports using the `-p` flag, for example: + + $ sudo docker run -d -p 5000:5000 training/webapp python app.py + +This would map port 5000 inside our container to port 5000 on our local +host. You might be asking about now: why wouldn't we just want to always +use 1:1 port mappings in Docker containers rather than mapping to high +ports? Well 1:1 mappings have the constraint of only being able to map +one of each port on your local host. Let's say you want to test two +Python applications: both bound to port 5000 inside your container. +Without Docker's port mapping you could only access one at a time. + +So let's now browse to port 49155 in a web browser to +see the application. + +![Viewing the web application](/userguide/webapp1.png). + +Our Python application is live! + +## A Network Port Shortcut + +Using the `docker ps` command to return the mapped port is a bit clumsy so +Docker has a useful shortcut we can use: `docker port`. To use `docker port` we +specify the ID or name of our container and then the port for which we need the +corresponding public-facing port. + + $ sudo docker port nostalgic_morse 5000 + 0.0.0.0:49155 + +In this case we've looked up what port is mapped externally to port 5000 inside +the container. + +## Viewing the Web Application's Logs + +Let's also find out a bit more about what's happening with our application and +use another of the commands we've learnt, `docker logs`. + + $ sudo docker logs -f nostalgic_morse + * Running on http://0.0.0.0:5000/ + 10.0.2.2 - - [23/May/2014 20:16:31] "GET / HTTP/1.1" 200 - + 10.0.2.2 - - [23/May/2014 20:16:31] "GET /favicon.ico HTTP/1.1" 404 - + +This time though we've added a new flag, `-f`. This causes the `docker +logs` command to act like the `tail -f` command and watch the +container's standard out. We can see here the logs from Flask showing +the application running on port 5000 and the access log entries for it. + +## Looking at our Web Application Container's processes + +In addition to the container's logs we can also examine the processes +running inside it using the `docker top` command. + + $ sudo docker top nostalgic_morse + PID USER COMMAND + 854 root python app.py + +Here we can see our `python app.py` command is the only process running inside +the container. + +## Inspecting our Web Application Container + +Lastly, we can take a low-level dive into our Docker container using the +`docker inspect` command. It returns a JSON hash of useful configuration +and status information about Docker containers. + + $ docker inspect nostalgic_morse + +Let's see a sample of that JSON output. + + [{ + "ID": "bc533791f3f500b280a9626688bc79e342e3ea0d528efe3a86a51ecb28ea20", + "Created": "2014-05-26T05:52:40.808952951Z", + "Path": "python", + "Args": [ + "app.py" + ], + "Config": { + "Hostname": "bc533791f3f5", + "Domainname": "", + "User": "", + . . . + +We can also narrow down the information we want to return by requesting a +specific element, for example to return the container's IP address we would: + + $ sudo docker inspect -f '{{ .NetworkSettings.IPAddress }}' + 172.17.0.5 + +## Stopping our Web Application Container + +Okay we've seen web application working. Now let's stop it using the +`docker stop` command and the name of our container: `nostalgic_morse`. + + $ sudo docker stop nostalgic_morse + nostalgic_morse + +We can now use the `docker ps` command to check if the container has +been stopped. + + $ sudo docker ps -l + +## Restarting our Web Application Container + +Oops! Just after you stopped the container you get a call to say another +developer needs the container back. From here you have two choices: you +can create a new container or restart the old one. Let's look at +starting our previous container back up. + + $ sudo docker start nostalgic_morse + nostalgic_morse + +Now quickly run `docker ps -l` again to see the running container is +back up or browse to the container's URL to see if the application +responds. + +> **Note:** +> Also available is the `docker restart` command that runs a stop and +> then start on the container. + +## Removing our Web Application Container + +Your colleague has let you know that they've now finished with the container +and won't need it again. So let's remove it using the `docker rm` command. + + $ sudo docker rm nostalgic_morse + Error: Impossible to remove a running container, please stop it first or use -f + 2014/05/24 08:12:56 Error: failed to remove one or more containers + +What's happened? We can't actually remove a running container. This protects +you from accidentally removing a running container you might need. Let's try +this again by stopping the container first. + + $ sudo docker stop nostalgic_morse + nostalgic_morse + $ sudo docker rm nostalgic_morse + nostalgic_morse + +And now our container is stopped and deleted. + +> **Note:** +> Always remember that deleting a container is final! + +# Next steps + +Until now we've only used images that we've downloaded from +[Docker.io](https://index.docker.io) now let's get introduced to +building and sharing our own images. + +Go to [Working with Docker Images](/userguide/dockerimages). + diff --git a/docs/sources/userguide/webapp1.png b/docs/sources/userguide/webapp1.png new file mode 100644 index 0000000000..5653497f17 Binary files /dev/null and b/docs/sources/userguide/webapp1.png differ diff --git a/docs/theme/mkdocs/base.html b/docs/theme/mkdocs/base.html index ca418b3cd7..66bb2d3d68 100644 --- a/docs/theme/mkdocs/base.html +++ b/docs/theme/mkdocs/base.html @@ -67,5 +67,19 @@ + diff --git a/docs/theme/mkdocs/css/base.css b/docs/theme/mkdocs/css/base.css index 999a0dedbe..956e17a263 100644 --- a/docs/theme/mkdocs/css/base.css +++ b/docs/theme/mkdocs/css/base.css @@ -59,6 +59,11 @@ h6, padding: 0.5em 0.75em !important; line-height: 1.8em; background: #fff; + overflow-x: auto; +} +#content pre code { + word-wrap: normal; + white-space: pre; } #content blockquote { background: #fff; diff --git a/engine/engine.go b/engine/engine.go index 58b43eca04..5c3228d5d3 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -3,11 +3,12 @@ package engine import ( "bufio" "fmt" - "github.com/dotcloud/docker/utils" "io" "os" "sort" "strings" + + "github.com/dotcloud/docker/utils" ) // Installer is a standard interface for objects which can "install" themselves diff --git a/engine/env.go b/engine/env.go index f96795f48c..f63f29e10f 100644 --- a/engine/env.go +++ b/engine/env.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "sort" "strconv" "strings" ) @@ -252,134 +251,26 @@ func (env *Env) Map() map[string]string { return m } -type Table struct { - Data []*Env - sortKey string - Chan chan *Env -} - -func NewTable(sortKey string, sizeHint int) *Table { - return &Table{ - make([]*Env, 0, sizeHint), - sortKey, - make(chan *Env), +// MultiMap returns a representation of env as a +// map of string arrays, keyed by string. +// This is the same structure as http headers for example, +// which allow each key to have multiple values. +func (env *Env) MultiMap() map[string][]string { + m := make(map[string][]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + m[parts[0]] = append(m[parts[0]], parts[1]) } + return m } -func (t *Table) SetKey(sortKey string) { - t.sortKey = sortKey -} - -func (t *Table) Add(env *Env) { - t.Data = append(t.Data, env) -} - -func (t *Table) Len() int { - return len(t.Data) -} - -func (t *Table) Less(a, b int) bool { - return t.lessBy(a, b, t.sortKey) -} - -func (t *Table) lessBy(a, b int, by string) bool { - keyA := t.Data[a].Get(by) - keyB := t.Data[b].Get(by) - intA, errA := strconv.ParseInt(keyA, 10, 64) - intB, errB := strconv.ParseInt(keyB, 10, 64) - if errA == nil && errB == nil { - return intA < intB - } - return keyA < keyB -} - -func (t *Table) Swap(a, b int) { - tmp := t.Data[a] - t.Data[a] = t.Data[b] - t.Data[b] = tmp -} - -func (t *Table) Sort() { - sort.Sort(t) -} - -func (t *Table) ReverseSort() { - sort.Sort(sort.Reverse(t)) -} - -func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { - if _, err := dst.Write([]byte{'['}); err != nil { - return -1, err - } - n = 1 - for i, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - if i != len(t.Data)-1 { - if _, err := dst.Write([]byte{','}); err != nil { - return -1, err - } - n += 1 +// InitMultiMap removes all values in env, then initializes +// new values from the contents of m. +func (env *Env) InitMultiMap(m map[string][]string) { + (*env) = make([]string, 0, len(m)) + for k, vals := range m { + for _, v := range vals { + env.Set(k, v) } } - if _, err := dst.Write([]byte{']'}); err != nil { - return -1, err - } - return n + 1, nil -} - -func (t *Table) ToListString() (string, error) { - buffer := bytes.NewBuffer(nil) - if _, err := t.WriteListTo(buffer); err != nil { - return "", err - } - return buffer.String(), nil -} - -func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { - for _, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - } - return n, nil -} - -func (t *Table) ReadListFrom(src []byte) (n int64, err error) { - var array []interface{} - - if err := json.Unmarshal(src, &array); err != nil { - return -1, err - } - - for _, item := range array { - if m, ok := item.(map[string]interface{}); ok { - env := &Env{} - for key, value := range m { - env.SetAuto(key, value) - } - t.Add(env) - } - } - - return int64(len(src)), nil -} - -func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { - decoder := NewDecoder(src) - for { - env, err := decoder.Decode() - if err == io.EOF { - return 0, nil - } else if err != nil { - return -1, err - } - t.Add(env) - } - return 0, nil } diff --git a/engine/env_test.go b/engine/env_test.go index 0c66cea04e..39669d6780 100644 --- a/engine/env_test.go +++ b/engine/env_test.go @@ -123,3 +123,23 @@ func TestEnviron(t *testing.T) { t.Fatalf("bar not found in the environ") } } + +func TestMultiMap(t *testing.T) { + e := &Env{} + e.Set("foo", "bar") + e.Set("bar", "baz") + e.Set("hello", "world") + m := e.MultiMap() + e2 := &Env{} + e2.Set("old_key", "something something something") + e2.InitMultiMap(m) + if v := e2.Get("old_key"); v != "" { + t.Fatalf("%#v", v) + } + if v := e2.Get("bar"); v != "baz" { + t.Fatalf("%#v", v) + } + if v := e2.Get("hello"); v != "world" { + t.Fatalf("%#v", v) + } +} diff --git a/engine/job.go b/engine/job.go index b56155ac1c..ab8120dd44 100644 --- a/engine/job.go +++ b/engine/job.go @@ -1,6 +1,7 @@ package engine import ( + "bytes" "fmt" "io" "strings" @@ -56,8 +57,8 @@ func (job *Job) Run() error { defer func() { job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString()) }() - var errorMessage string - job.Stderr.AddString(&errorMessage) + var errorMessage = bytes.NewBuffer(nil) + job.Stderr.Add(errorMessage) if job.handler == nil { job.Errorf("%s: command not found", job.Name) job.status = 127 @@ -72,8 +73,11 @@ func (job *Job) Run() error { if err := job.Stderr.Close(); err != nil { return err } + if err := job.Stdin.Close(); err != nil { + return err + } if job.status != 0 { - return fmt.Errorf("%s", errorMessage) + return fmt.Errorf("%s", Tail(errorMessage, 1)) } return nil } diff --git a/engine/job_test.go b/engine/job_test.go index 1f927cbafc..67e723988e 100644 --- a/engine/job_test.go +++ b/engine/job_test.go @@ -1,6 +1,8 @@ package engine import ( + "bytes" + "fmt" "testing" ) @@ -40,13 +42,13 @@ func TestJobStdoutString(t *testing.T) { }) job := eng.Job("say_something_in_stdout") - var output string - if err := job.Stdout.AddString(&output); err != nil { - t.Fatal(err) - } + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } + fmt.Println(outputBuffer) + var output = Tail(outputBuffer, 1) if expectedOutput := "Hello world"; output != expectedOutput { t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } @@ -61,13 +63,12 @@ func TestJobStderrString(t *testing.T) { }) job := eng.Job("say_something_in_stderr") - var output string - if err := job.Stderr.AddString(&output); err != nil { - t.Fatal(err) - } + var outputBuffer = bytes.NewBuffer(nil) + job.Stderr.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } + var output = Tail(outputBuffer, 1) if expectedOutput := "Something happened"; output != expectedOutput { t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } diff --git a/engine/remote.go b/engine/remote.go index 60aad243c5..974ca02137 100644 --- a/engine/remote.go +++ b/engine/remote.go @@ -25,7 +25,9 @@ func (s *Sender) Install(eng *Engine) error { } func (s *Sender) Handle(job *Job) Status { - msg := data.Empty().Set("cmd", append([]string{job.Name}, job.Args...)...) + cmd := append([]string{job.Name}, job.Args...) + env := data.Encode(job.Env().MultiMap()) + msg := data.Empty().Set("cmd", cmd...).Set("env", env) peer, err := beam.SendConn(s, msg.Bytes()) if err != nil { return job.Errorf("beamsend: %v", err) @@ -36,20 +38,27 @@ func (s *Sender) Handle(job *Job) Status { r := beam.NewRouter(nil) r.NewRoute().KeyStartsWith("cmd", "log", "stdout").HasAttachment().Handler(func(p []byte, stdout *os.File) error { tasks.Add(1) - io.Copy(job.Stdout, stdout) - tasks.Done() + go func() { + io.Copy(job.Stdout, stdout) + stdout.Close() + tasks.Done() + }() return nil }) r.NewRoute().KeyStartsWith("cmd", "log", "stderr").HasAttachment().Handler(func(p []byte, stderr *os.File) error { tasks.Add(1) - io.Copy(job.Stderr, stderr) - tasks.Done() + go func() { + io.Copy(job.Stderr, stderr) + stderr.Close() + tasks.Done() + }() return nil }) r.NewRoute().KeyStartsWith("cmd", "log", "stdin").HasAttachment().Handler(func(p []byte, stdin *os.File) error { - tasks.Add(1) - io.Copy(stdin, job.Stdin) - tasks.Done() + go func() { + io.Copy(stdin, job.Stdin) + stdin.Close() + }() return nil }) var status int @@ -90,19 +99,28 @@ func (rcv *Receiver) Run() error { f.Close() return err } - cmd := data.Message(p).Get("cmd") + f.Close() + defer peer.Close() + msg := data.Message(p) + cmd := msg.Get("cmd") job := rcv.Engine.Job(cmd[0], cmd[1:]...) - stdout, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes()) + // Decode env + env, err := data.Decode(msg.GetOne("env")) + if err != nil { + return fmt.Errorf("error decoding 'env': %v", err) + } + job.Env().InitMultiMap(env) + stdout, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes()) if err != nil { return err } job.Stdout.Add(stdout) - stderr, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes()) + stderr, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes()) if err != nil { return err } job.Stderr.Add(stderr) - stdin, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes()) + stdin, err := beam.SendWPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes()) if err != nil { return err } diff --git a/engine/remote_test.go b/engine/remote_test.go index 54092ec934..1563660c97 100644 --- a/engine/remote_test.go +++ b/engine/remote_test.go @@ -1,3 +1,150 @@ package engine -import () +import ( + "bufio" + "bytes" + "fmt" + "github.com/dotcloud/docker/pkg/beam" + "github.com/dotcloud/docker/pkg/testutils" + "io" + "strings" + "testing" + "time" +) + +func TestHelloWorld(t *testing.T) { + for i := 0; i < 10; i++ { + testRemote(t, + + // Sender side + func(eng *Engine) { + job := eng.Job("echo", "hello", "world") + out := &bytes.Buffer{} + job.Stdout.Add(out) + job.Run() + if job.status != StatusOK { + t.Fatalf("#%v", job.StatusCode()) + } + lines := bufio.NewScanner(out) + var i int + for lines.Scan() { + if lines.Text() != "hello world" { + t.Fatalf("%#v", lines.Text()) + } + i++ + } + if i != 1000 { + t.Fatalf("%#v", i) + } + }, + + // Receiver side + func(eng *Engine) { + eng.Register("echo", func(job *Job) Status { + // Simulate more output with a delay in the middle + for i := 0; i < 500; i++ { + fmt.Fprintf(job.Stdout, "%s\n", strings.Join(job.Args, " ")) + } + time.Sleep(5 * time.Millisecond) + for i := 0; i < 500; i++ { + fmt.Fprintf(job.Stdout, "%s\n", strings.Join(job.Args, " ")) + } + return StatusOK + }) + }, + ) + } +} + +func TestStdin(t *testing.T) { + testRemote(t, + + func(eng *Engine) { + job := eng.Job("mirror") + job.Stdin.Add(strings.NewReader("hello world!\n")) + out := &bytes.Buffer{} + job.Stdout.Add(out) + if err := job.Run(); err != nil { + t.Fatal(err) + } + if out.String() != "hello world!\n" { + t.Fatalf("%#v", out.String()) + } + }, + + func(eng *Engine) { + eng.Register("mirror", func(job *Job) Status { + if _, err := io.Copy(job.Stdout, job.Stdin); err != nil { + t.Fatal(err) + } + return StatusOK + }) + }, + ) +} + +func TestEnv(t *testing.T) { + var ( + foo string + answer int + shadok_words []string + ) + testRemote(t, + + func(eng *Engine) { + job := eng.Job("sendenv") + job.Env().Set("foo", "bar") + job.Env().SetInt("answer", 42) + job.Env().SetList("shadok_words", []string{"ga", "bu", "zo", "meu"}) + if err := job.Run(); err != nil { + t.Fatal(err) + } + }, + + func(eng *Engine) { + eng.Register("sendenv", func(job *Job) Status { + foo = job.Env().Get("foo") + answer = job.Env().GetInt("answer") + shadok_words = job.Env().GetList("shadok_words") + return StatusOK + }) + }, + ) + // Check for results here rather than inside the job handler, + // otherwise the tests may incorrectly pass if the handler is not + // called. + if foo != "bar" { + t.Fatalf("%#v", foo) + } + if answer != 42 { + t.Fatalf("%#v", answer) + } + if strings.Join(shadok_words, ", ") != "ga, bu, zo, meu" { + t.Fatalf("%#v", shadok_words) + } +} + +// Helpers + +func testRemote(t *testing.T, senderSide, receiverSide func(*Engine)) { + sndConn, rcvConn, err := beam.USocketPair() + if err != nil { + t.Fatal(err) + } + defer sndConn.Close() + defer rcvConn.Close() + sender := NewSender(sndConn) + receiver := NewReceiver(rcvConn) + + // Setup the sender side + eng := New() + sender.Install(eng) + + // Setup the receiver side + receiverSide(receiver.Engine) + go receiver.Run() + + testutils.Timeout(t, func() { + senderSide(eng) + }) +} diff --git a/engine/streams.go b/engine/streams.go index 48f031de8f..99e876e17b 100644 --- a/engine/streams.go +++ b/engine/streams.go @@ -1,8 +1,7 @@ package engine import ( - "bufio" - "container/ring" + "bytes" "fmt" "io" "io/ioutil" @@ -16,6 +15,28 @@ type Output struct { used bool } +// Tail returns the n last lines of a buffer +// stripped out of the last \n, if any +// if n <= 0, returns an empty string +func Tail(buffer *bytes.Buffer, n int) string { + if n <= 0 { + return "" + } + bytes := buffer.Bytes() + if len(bytes) > 0 && bytes[len(bytes)-1] == '\n' { + bytes = bytes[:len(bytes)-1] + } + for i := buffer.Len() - 2; i >= 0; i-- { + if bytes[i] == '\n' { + n-- + if n == 0 { + return string(bytes[i+1:]) + } + } + } + return string(bytes) +} + // NewOutput returns a new Output object with no destinations attached. // Writing to an empty Output will cause the written data to be discarded. func NewOutput() *Output { @@ -58,42 +79,6 @@ func (o *Output) AddPipe() (io.Reader, error) { return r, nil } -// AddTail starts a new goroutine which will read all subsequent data written to the output, -// line by line, and append the last `n` lines to `dst`. -func (o *Output) AddTail(dst *[]string, n int) error { - src, err := o.AddPipe() - if err != nil { - return err - } - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - Tail(src, n, dst) - }() - return nil -} - -// AddString starts a new goroutine which will read all subsequent data written to the output, -// line by line, and store the last line into `dst`. -func (o *Output) AddString(dst *string) error { - src, err := o.AddPipe() - if err != nil { - return err - } - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - lines := make([]string, 0, 1) - Tail(src, 1, &lines) - if len(lines) == 0 { - *dst = "" - } else { - *dst = lines[0] - } - }() - return nil -} - // Write writes the same data to all registered destinations. // This method is thread-safe. func (o *Output) Write(p []byte) (n int, err error) { @@ -118,7 +103,7 @@ func (o *Output) Close() error { defer o.Unlock() var firstErr error for _, dst := range o.dests { - if closer, ok := dst.(io.WriteCloser); ok { + if closer, ok := dst.(io.Closer); ok { err := closer.Close() if err != nil && firstErr == nil { firstErr = err @@ -154,7 +139,7 @@ func (i *Input) Read(p []byte) (n int, err error) { // Not thread safe on purpose func (i *Input) Close() error { if i.src != nil { - if closer, ok := i.src.(io.WriteCloser); ok { + if closer, ok := i.src.(io.Closer); ok { return closer.Close() } } @@ -174,26 +159,6 @@ func (i *Input) Add(src io.Reader) error { return nil } -// Tail reads from `src` line per line, and returns the last `n` lines as an array. -// A ring buffer is used to only store `n` lines at any time. -func Tail(src io.Reader, n int, dst *[]string) { - scanner := bufio.NewScanner(src) - r := ring.New(n) - for scanner.Scan() { - if n == 0 { - continue - } - r.Value = scanner.Text() - r = r.Next() - } - r.Do(func(v interface{}) { - if v == nil { - return - } - *dst = append(*dst, v.(string)) - }) -} - // AddEnv starts a new goroutine which will decode all subsequent data // as a stream of json-encoded objects, and point `dst` to the last // decoded object. diff --git a/engine/streams_test.go b/engine/streams_test.go index 30d31d2952..83dd05c6f4 100644 --- a/engine/streams_test.go +++ b/engine/streams_test.go @@ -10,53 +10,6 @@ import ( "testing" ) -func TestOutputAddString(t *testing.T) { - var testInputs = [][2]string{ - { - "hello, world!", - "hello, world!", - }, - - { - "One\nTwo\nThree", - "Three", - }, - - { - "", - "", - }, - - { - "A line\nThen another nl-terminated line\n", - "Then another nl-terminated line", - }, - - { - "A line followed by an empty line\n\n", - "", - }, - } - for _, testData := range testInputs { - input := testData[0] - expectedOutput := testData[1] - o := NewOutput() - var output string - if err := o.AddString(&output); err != nil { - t.Error(err) - } - if n, err := o.Write([]byte(input)); err != nil { - t.Error(err) - } else if n != len(input) { - t.Errorf("Expected %d, got %d", len(input), n) - } - o.Close() - if output != expectedOutput { - t.Errorf("Last line is not stored as return string.\nInput: '%s'\nExpected: '%s'\nGot: '%s'", input, expectedOutput, output) - } - } -} - type sentinelWriteCloser struct { calledWrite bool calledClose bool @@ -145,59 +98,24 @@ func TestOutputAddPipe(t *testing.T) { } func TestTail(t *testing.T) { - var tests = make(map[string][][]string) - tests["hello, world!"] = [][]string{ - {}, - {"hello, world!"}, - {"hello, world!"}, - {"hello, world!"}, + var tests = make(map[string][]string) + tests["hello, world!"] = []string{ + "", + "hello, world!", + "hello, world!", + "hello, world!", } - tests["One\nTwo\nThree"] = [][]string{ - {}, - {"Three"}, - {"Two", "Three"}, - {"One", "Two", "Three"}, + tests["One\nTwo\nThree"] = []string{ + "", + "Three", + "Two\nThree", + "One\nTwo\nThree", } for input, outputs := range tests { for n, expectedOutput := range outputs { - var output []string - Tail(strings.NewReader(input), n, &output) - if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { - t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", expectedOutput, output) - } - } - } -} - -func TestOutputAddTail(t *testing.T) { - var tests = make(map[string][][]string) - tests["hello, world!"] = [][]string{ - {}, - {"hello, world!"}, - {"hello, world!"}, - {"hello, world!"}, - } - tests["One\nTwo\nThree"] = [][]string{ - {}, - {"Three"}, - {"Two", "Three"}, - {"One", "Two", "Three"}, - } - for input, outputs := range tests { - for n, expectedOutput := range outputs { - o := NewOutput() - var output []string - if err := o.AddTail(&output, n); err != nil { - t.Error(err) - } - if n, err := o.Write([]byte(input)); err != nil { - t.Error(err) - } else if n != len(input) { - t.Errorf("Expected %d, got %d", len(input), n) - } - o.Close() - if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { - t.Errorf("Tail(%d) returned wrong result.\nExpected: %v\nGot: %v", n, expectedOutput, output) + output := Tail(bytes.NewBufferString(input), n) + if output != expectedOutput { + t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output) } } } diff --git a/engine/table.go b/engine/table.go new file mode 100644 index 0000000000..292c4ed677 --- /dev/null +++ b/engine/table.go @@ -0,0 +1,141 @@ +package engine + +import ( + "bytes" + "encoding/json" + "io" + "sort" + "strconv" +) + +type Table struct { + Data []*Env + sortKey string + Chan chan *Env +} + +func NewTable(sortKey string, sizeHint int) *Table { + return &Table{ + make([]*Env, 0, sizeHint), + sortKey, + make(chan *Env), + } +} + +func (t *Table) SetKey(sortKey string) { + t.sortKey = sortKey +} + +func (t *Table) Add(env *Env) { + t.Data = append(t.Data, env) +} + +func (t *Table) Len() int { + return len(t.Data) +} + +func (t *Table) Less(a, b int) bool { + return t.lessBy(a, b, t.sortKey) +} + +func (t *Table) lessBy(a, b int, by string) bool { + keyA := t.Data[a].Get(by) + keyB := t.Data[b].Get(by) + intA, errA := strconv.ParseInt(keyA, 10, 64) + intB, errB := strconv.ParseInt(keyB, 10, 64) + if errA == nil && errB == nil { + return intA < intB + } + return keyA < keyB +} + +func (t *Table) Swap(a, b int) { + tmp := t.Data[a] + t.Data[a] = t.Data[b] + t.Data[b] = tmp +} + +func (t *Table) Sort() { + sort.Sort(t) +} + +func (t *Table) ReverseSort() { + sort.Sort(sort.Reverse(t)) +} + +func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { + if _, err := dst.Write([]byte{'['}); err != nil { + return -1, err + } + n = 1 + for i, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + if i != len(t.Data)-1 { + if _, err := dst.Write([]byte{','}); err != nil { + return -1, err + } + n += 1 + } + } + if _, err := dst.Write([]byte{']'}); err != nil { + return -1, err + } + return n + 1, nil +} + +func (t *Table) ToListString() (string, error) { + buffer := bytes.NewBuffer(nil) + if _, err := t.WriteListTo(buffer); err != nil { + return "", err + } + return buffer.String(), nil +} + +func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { + for _, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + } + return n, nil +} + +func (t *Table) ReadListFrom(src []byte) (n int64, err error) { + var array []interface{} + + if err := json.Unmarshal(src, &array); err != nil { + return -1, err + } + + for _, item := range array { + if m, ok := item.(map[string]interface{}); ok { + env := &Env{} + for key, value := range m { + env.SetAuto(key, value) + } + t.Add(env) + } + } + + return int64(len(src)), nil +} + +func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { + decoder := NewDecoder(src) + for { + env, err := decoder.Decode() + if err == io.EOF { + return 0, nil + } else if err != nil { + return -1, err + } + t.Add(env) + } + return 0, nil +} diff --git a/engine/table_test.go b/engine/table_test.go index 3e8e4ff1b3..9a32ac9cdb 100644 --- a/engine/table_test.go +++ b/engine/table_test.go @@ -26,3 +26,87 @@ func TestTableWriteTo(t *testing.T) { t.Fatalf("Inccorect output: %v", output) } } + +func TestTableSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.Sort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "B" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "C" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } +} + +func TestTableReverseSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.ReverseSort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "C" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "B" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } +} diff --git a/graph/graph.go b/graph/graph.go index b889139121..c9e9e6a949 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -2,12 +2,6 @@ package graph import ( "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon/graphdriver" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" @@ -17,6 +11,13 @@ import ( "strings" "syscall" "time" + + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" ) // A Graph is a store for versioned filesystem images and the relationship between them. @@ -141,11 +142,13 @@ func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, contain Architecture: runtime.GOARCH, OS: runtime.GOOS, } + if containerID != "" { img.Parent = containerImage img.Container = containerID img.ContainerConfig = *containerConfig } + if err := graph.Register(nil, layerData, img); err != nil { return nil, err } @@ -262,8 +265,6 @@ func SetupInitLayer(initLayer string) error { "/etc/hostname": "file", "/dev/console": "file", "/etc/mtab": "/proc/mounts", - // "var/run": "dir", - // "var/lock": "dir", } { parts := strings.Split(pth, "/") prev := "/" diff --git a/graph/service.go b/graph/service.go new file mode 100644 index 0000000000..4bce6b5645 --- /dev/null +++ b/graph/service.go @@ -0,0 +1,190 @@ +package graph + +import ( + "encoding/json" + "io" + + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/utils" +) + +func (s *TagStore) Install(eng *engine.Engine) error { + eng.Register("image_set", s.CmdSet) + eng.Register("image_tag", s.CmdTag) + eng.Register("image_get", s.CmdGet) + eng.Register("image_inspect", s.CmdLookup) + eng.Register("image_tarlayer", s.CmdTarLayer) + return nil +} + +// CmdSet stores a new image in the graph. +// Images are stored in the graph using 4 elements: +// - A user-defined ID +// - A collection of metadata describing the image +// - A directory tree stored as a tar archive (also called the "layer") +// - A reference to a "parent" ID on top of which the layer should be applied +// +// NOTE: even though the parent ID is only useful in relation to the layer and how +// to apply it (ie you could represent the full directory tree as 'parent_layer + layer', +// it is treated as a top-level property of the image. This is an artifact of early +// design and should probably be cleaned up in the future to simplify the design. +// +// Syntax: image_set ID +// Input: +// - Layer content must be streamed in tar format on stdin. An empty input is +// valid and represents a nil layer. +// +// - Image metadata must be passed in the command environment. +// 'json': a json-encoded object with all image metadata. +// It will be stored as-is, without any encoding/decoding artifacts. +// That is a requirement of the current registry client implementation, +// because a re-encoded json might invalidate the image checksum at +// the next upload, even with functionaly identical content. +func (s *TagStore) CmdSet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + var ( + imgJSON = []byte(job.Getenv("json")) + layer = job.Stdin + ) + if len(imgJSON) == 0 { + return job.Errorf("mandatory key 'json' is not set") + } + // We have to pass an *image.Image object, even though it will be completely + // ignored in favor of the redundant json data. + // FIXME: the current prototype of Graph.Register is stupid and redundant. + img, err := image.NewImgJSON(imgJSON) + if err != nil { + return job.Error(err) + } + if err := s.graph.Register(imgJSON, layer, img); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// CmdTag assigns a new name and tag to an existing image. If the tag already exists, +// it is changed and the image previously referenced by the tag loses that reference. +// This may cause the old image to be garbage-collected if its reference count reaches zero. +// +// Syntax: image_tag NEWNAME OLDNAME +// Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0 +func (s *TagStore) CmdTag(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name) + } + var ( + newName = job.Args[0] + oldName = job.Args[1] + ) + newRepo, newTag := utils.ParseRepositoryTag(newName) + // FIXME: Set should either parse both old and new name, or neither. + // the current prototype is inconsistent. + if err := s.Set(newRepo, newTag, oldName, true); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// CmdGet returns information about an image. +// If the image doesn't exist, an empty object is returned, to allow +// checking for an image's existence. +func (s *TagStore) CmdGet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + res := &engine.Env{} + img, err := s.LookupImage(name) + // Note: if the image doesn't exist, LookupImage returns + // nil, nil. + if err != nil { + return job.Error(err) + } + if img != nil { + // We don't directly expose all fields of the Image objects, + // to maintain a clean public API which we can maintain over + // time even if the underlying structure changes. + // We should have done this with the Image object to begin with... + // but we didn't, so now we're doing it here. + // + // Fields that we're probably better off not including: + // - Config/ContainerConfig. Those structs have the same sprawl problem, + // so we shouldn't include them wholesale either. + // - Comment: initially created to fulfill the "every image is a git commit" + // metaphor, in practice people either ignore it or use it as a + // generic description field which it isn't. On deprecation shortlist. + res.SetAuto("Created", img.Created) + res.Set("Author", img.Author) + res.Set("Os", img.OS) + res.Set("Architecture", img.Architecture) + res.Set("DockerVersion", img.DockerVersion) + res.Set("Id", img.ID) + res.Set("Parent", img.Parent) + } + res.WriteTo(job.Stdout) + return engine.StatusOK +} + +// CmdLookup return an image encoded in JSON +func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + if job.GetenvBool("dirty") { + b, err := json.Marshal(image) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK + } + + out := &engine.Env{} + out.Set("Id", image.ID) + out.Set("Parent", image.Parent) + out.Set("Comment", image.Comment) + out.SetAuto("Created", image.Created) + out.Set("Container", image.Container) + out.SetJson("ContainerConfig", image.ContainerConfig) + out.Set("DockerVersion", image.DockerVersion) + out.Set("Author", image.Author) + out.SetJson("Config", image.Config) + out.Set("Architecture", image.Architecture) + out.Set("Os", image.OS) + out.SetInt64("Size", image.Size) + if _, err = out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} + +// CmdTarLayer return the tarLayer of the image +func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + fs, err := image.TarLayer() + if err != nil { + return job.Error(err) + } + defer fs.Close() + + if written, err := io.Copy(job.Stdout, fs); err != nil { + return job.Error(err) + } else { + utils.Debugf("rendered layer for %s of [%d] size", image.ID, written) + } + + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} diff --git a/graph/tags.go b/graph/tags.go index 524e1a1f9d..7af6d383d8 100644 --- a/graph/tags.go +++ b/graph/tags.go @@ -3,13 +3,15 @@ package graph import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/utils" "io/ioutil" "os" "path/filepath" "sort" "strings" + "sync" + + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/utils" ) const DEFAULTTAG = "latest" @@ -18,6 +20,7 @@ type TagStore struct { path string graph *Graph Repositories map[string]Repository + sync.Mutex } type Repository map[string]string @@ -33,8 +36,8 @@ func NewTagStore(path string, graph *Graph) (*TagStore, error) { Repositories: make(map[string]Repository), } // Load the json file if it exists, otherwise create it. - if err := store.Reload(); os.IsNotExist(err) { - if err := store.Save(); err != nil { + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { return nil, err } } else if err != nil { @@ -43,7 +46,7 @@ func NewTagStore(path string, graph *Graph) (*TagStore, error) { return store, nil } -func (store *TagStore) Save() error { +func (store *TagStore) save() error { // Store the json ball jsonData, err := json.Marshal(store) if err != nil { @@ -55,7 +58,7 @@ func (store *TagStore) Save() error { return nil } -func (store *TagStore) Reload() error { +func (store *TagStore) reload() error { jsonData, err := ioutil.ReadFile(store.path) if err != nil { return err @@ -74,6 +77,8 @@ func (store *TagStore) LookupImage(name string) (*image.Image, error) { tag = DEFAULTTAG } img, err := store.GetImage(repos, tag) + store.Lock() + defer store.Unlock() if err != nil { return nil, err } else if img == nil { @@ -87,6 +92,8 @@ func (store *TagStore) LookupImage(name string) (*image.Image, error) { // Return a reverse-lookup table of all the names which refer to each image // Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} func (store *TagStore) ByID() map[string][]string { + store.Lock() + defer store.Unlock() byID := make(map[string][]string) for repoName, repository := range store.Repositories { for tag, id := range repository { @@ -130,8 +137,10 @@ func (store *TagStore) DeleteAll(id string) error { } func (store *TagStore) Delete(repoName, tag string) (bool, error) { + store.Lock() + defer store.Unlock() deleted := false - if err := store.Reload(); err != nil { + if err := store.reload(); err != nil { return false, err } if r, exists := store.Repositories[repoName]; exists { @@ -150,13 +159,15 @@ func (store *TagStore) Delete(repoName, tag string) (bool, error) { deleted = true } } else { - fmt.Errorf("No such repository: %s", repoName) + return false, fmt.Errorf("No such repository: %s", repoName) } - return deleted, store.Save() + return deleted, store.save() } func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { img, err := store.LookupImage(imageName) + store.Lock() + defer store.Unlock() if err != nil { return err } @@ -169,7 +180,7 @@ func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { if err := validateTagName(tag); err != nil { return err } - if err := store.Reload(); err != nil { + if err := store.reload(); err != nil { return err } var repo Repository @@ -183,11 +194,13 @@ func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { store.Repositories[repoName] = repo } repo[tag] = img.ID - return store.Save() + return store.save() } func (store *TagStore) Get(repoName string) (Repository, error) { - if err := store.Reload(); err != nil { + store.Lock() + defer store.Unlock() + if err := store.reload(); err != nil { return nil, err } if r, exists := store.Repositories[repoName]; exists { @@ -198,6 +211,8 @@ func (store *TagStore) Get(repoName string) (Repository, error) { func (store *TagStore) GetImage(repoName, tagOrID string) (*image.Image, error) { repo, err := store.Get(repoName) + store.Lock() + defer store.Unlock() if err != nil { return nil, err } else if repo == nil { @@ -215,6 +230,20 @@ func (store *TagStore) GetImage(repoName, tagOrID string) (*image.Image, error) return nil, nil } +func (store *TagStore) GetRepoRefs() map[string][]string { + store.Lock() + reporefs := make(map[string][]string) + + for name, repository := range store.Repositories { + for tag, id := range repository { + shortID := utils.TruncateID(id) + reporefs[shortID] = append(reporefs[shortID], fmt.Sprintf("%s:%s", name, tag)) + } + } + store.Unlock() + return reporefs +} + // Validate the name of a repository func validateRepoName(name string) error { if name == "" { diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go index bc438131ca..42e097724d 100644 --- a/graph/tags_unit_test.go +++ b/graph/tags_unit_test.go @@ -36,7 +36,7 @@ func fakeTar() (io.Reader, error) { } func mkTestTagStore(root string, t *testing.T) *TagStore { - driver, err := graphdriver.New(root) + driver, err := graphdriver.New(root, nil) if err != nil { t.Fatal(err) } diff --git a/hack/MAINTAINERS.md b/hack/MAINTAINERS.md index be3117c864..9dbdf99d9a 100644 --- a/hack/MAINTAINERS.md +++ b/hack/MAINTAINERS.md @@ -53,14 +53,17 @@ All decisions affecting docker, big and small, follow the same 3 steps: * Step 2: Discuss the pull request. Anyone can do this. -* Step 3: Accept or refuse a pull request. The relevant maintainer does this (see below "Who decides what?") +* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do +this (see below "Who decides what?") ## Who decides what? -So all decisions are pull requests, and the relevant maintainer makes -the decision by accepting or refusing the pull request. But how do we -identify the relevant maintainer for a given pull request? +All decisions are pull requests, and the relevant maintainers make +decisions by accepting or refusing the pull request. Review and acceptance +by anyone is denoted by adding a comment in the pull request: `LGTM`. +However, only currently listed `MAINTAINERS` are counted towards the required +majority. Docker follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for @@ -70,19 +73,22 @@ decisions are made by default by Solomon. Since making every decision myself would be highly un-scalable, in practice decisions are spread across multiple maintainers. -The relevant maintainer for a pull request is assigned in 3 steps: +The relevant maintainers for a pull request can be worked out in 2 steps: -* Step 1: Determine the subdirectory affected by the pull request. This +* Step 1: Determine the subdirectories affected by the pull request. This might be `src/registry`, `docs/source/api`, or any other part of the repo. * Step 2: Find the `MAINTAINERS` file which affects this directory. If the directory itself does not have a `MAINTAINERS` file, work your way up the repo hierarchy until you find one. -* Step 3: The first maintainer listed is the primary maintainer. The - pull request is assigned to him. He may assign it to other listed - maintainers, at his discretion. +There is also a `hacks/getmaintainers.sh` script that will print out the +maintainers for a specified directory. +### I'm a maintainer, and I'm going on holiday + +Please let your co-maintainers and other contributors know by raising a pull +request that comments out your `MAINTAINERS` file entry using a `#`. ### I'm a maintainer, should I make pull requests too? @@ -91,7 +97,7 @@ made through a pull request. ### Who assigns maintainers? -Solomon. +Solomon has final `LGTM` approval for all pull requests to `MAINTAINERS` files. ### How is this process changed? diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 9edb4a3e14..82d959c9e2 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -297,7 +297,7 @@ the client will even run on alternative platforms such as Mac OS X / Darwin. Some of Docker's features are activated by using optional command-line flags or by having support for them in the kernel or userspace. A few examples include: -* LXC execution driver (requires version 0.8 or later of the LXC utility scripts) +* LXC execution driver (requires version 1.0 or later of the LXC utility scripts) * AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at least the "auplink" utility from aufs-tools) * experimental BTRFS graph driver (requires BTRFS support enabled in the kernel) diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md index 31f0adb757..2652e16c44 100644 --- a/hack/RELEASE-CHECKLIST.md +++ b/hack/RELEASE-CHECKLIST.md @@ -70,7 +70,7 @@ EXAMPLES: #### Builder -+ 'docker build -t FOO .' applies the tag FOO to the newly built container ++ 'docker build -t FOO .' applies the tag FOO to the newly built image #### Remote API @@ -104,8 +104,21 @@ make test ### 5. Test the docs Make sure that your tree includes documentation for any modified or -new features, syntax or semantic changes. Instructions for building -the docs are in `docs/README.md`. +new features, syntax or semantic changes. + +To test locally: + +```bash +make docs +``` + +To make a shared test at http://beta-docs.docker.io: + +(You will need the `awsconfig` file added to the `docs/` dir) + +```bash +make AWS_S3_BUCKET=beta-docs.docker.io docs-release +``` ### 6. Commit and create a pull request to the "release" branch @@ -211,17 +224,20 @@ branch afterwards! ### 12. Update the docs branch +You will need the `awsconfig` file added to the `docs/` directory to contain the +s3 credentials for the bucket you are deploying to. + ```bash git checkout docs git fetch git reset --hard origin/release git push -f origin docs +make AWS_S3_BUCKET=docs.docker.io docs-release ``` -Updating the docs branch will automatically update the documentation on the -"latest" revision of the docs. You should see the updated docs 5-10 minutes -after the merge. The docs will appear on http://docs.docker.io/. For more -information about documentation releases, see `docs/README.md`. +The docs will appear on http://docs.docker.io/ (though there may be cached +versions, so its worth checking http://docs.docker.io.s3-website-us-west-2.amazonaws.com/). +For more information about documentation releases, see `docs/README.md`. ### 13. Create a new pull request to merge release back into master diff --git a/hack/dind b/hack/dind index df2baa2757..a9de03e4ff 100755 --- a/hack/dind +++ b/hack/dind @@ -14,7 +14,7 @@ set -e export container=docker # First, make sure that cgroups are mounted correctly. -CGROUP=/sys/fs/cgroup +CGROUP=/cgroup mkdir -p "$CGROUP" diff --git a/hack/getmaintainer.sh b/hack/getmaintainer.sh index 2c24bacc89..ca532d42ec 100755 --- a/hack/getmaintainer.sh +++ b/hack/getmaintainer.sh @@ -1,4 +1,5 @@ -#!/bin/sh +#!/usr/bin/env bash +set -e if [ $# -ne 1 ]; then echo >&2 "Usage: $0 PATH" @@ -34,6 +35,7 @@ while true; do fi done; } < MAINTAINERS + break fi if [ -d .git ]; then break @@ -46,13 +48,15 @@ done PRIMARY="${MAINTAINERS[0]}" PRIMARY_FIRSTNAME=$(echo $PRIMARY | cut -d' ' -f1) +LGTM_COUNT=${#MAINTAINERS[@]} +LGTM_COUNT=$((LGTM_COUNT%2 +1)) firstname() { echo $1 | cut -d' ' -f1 } -echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1. Assign pull requests to him." -echo "$(firstname $PRIMARY) may assign pull requests to the following secondary maintainers:" +echo "A pull request in $1 will need $LGTM_COUNT LGTM's to be merged." +echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1." for SECONDARY in "${MAINTAINERS[@]:1}"; do echo "--- $SECONDARY" done diff --git a/hack/make.sh b/hack/make.sh index 8636756c87..cc5582a8df 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -18,7 +18,7 @@ set -e # - The right way to call this script is to invoke "make" from # your checkout of the Docker repository. # the Makefile will do a "docker build -t docker ." and then -# "docker run hack/make.sh" in the resulting container image. +# "docker run hack/make.sh" in the resulting image. # set -o pipefail @@ -170,6 +170,7 @@ find_dirs() { -o -wholename './.git' \ -o -wholename './bundles' \ -o -wholename './docs' \ + -o -wholename './pkg/libcontainer/nsinit' \ \) \ -prune \ \) -name "$1" -print0 | xargs -0n1 dirname | sort -u diff --git a/hack/make/test-integration b/hack/make/test-integration index 4c2bccaead..baad1349a2 100644 --- a/hack/make/test-integration +++ b/hack/make/test-integration @@ -10,6 +10,6 @@ bundle_test_integration() { # this "grep" hides some really irritating warnings that "go test -coverpkg" # spews when it is given packages that aren't used +exec > >(tee -a $DEST/test.log) 2>&1 bundle_test_integration 2>&1 \ - | grep --line-buffered -v '^warning: no packages being tested depend on ' \ - | tee $DEST/test.log + | grep --line-buffered -v '^warning: no packages being tested depend on ' diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index f2128a26ac..837bd8737a 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -11,6 +11,7 @@ bundle_test_integration_cli() { } # subshell so that we can export PATH without breaking other things +exec > >(tee -a $DEST/test.log) 2>&1 ( export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" @@ -40,4 +41,4 @@ bundle_test_integration_cli() { DOCKERD_PID=$(set -x; cat $DEST/docker.pid) ( set -x; kill $DOCKERD_PID ) wait $DOCKERD_PID || true -) 2>&1 | tee $DEST/test.log +) diff --git a/hack/make/test-unit b/hack/make/test-unit index 066865859c..552810f349 100644 --- a/hack/make/test-unit +++ b/hack/make/test-unit @@ -49,7 +49,8 @@ bundle_test_unit() { echo true fi - } 2>&1 | tee $DEST/test.log + } } +exec > >(tee -a $DEST/test.log) 2>&1 bundle_test_unit diff --git a/hack/vendor.sh b/hack/vendor.sh index 79322cd9af..8084f2eb9d 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -39,13 +39,13 @@ clone() { echo done } -clone git github.com/kr/pty 98c7b80083 +clone git github.com/kr/pty 67e2db24c8 -clone git github.com/gorilla/context 708054d61e5 +clone git github.com/gorilla/context b06ed15e1c -clone git github.com/gorilla/mux 9b36453141c +clone git github.com/gorilla/mux 136d54f81f -clone git github.com/syndtr/gocapability 3454319be2 +clone git github.com/syndtr/gocapability 3c85049eae clone hg code.google.com/p/go.net 84a4013f96e0 @@ -53,11 +53,11 @@ clone hg code.google.com/p/gosqlite 74691fb6f837 # get Go tip's archive/tar, for xattr support # TODO after Go 1.3 drops, bump our minimum supported version and drop this vendored dep -clone hg code.google.com/p/go a15f344a9efa +clone hg code.google.com/p/go 3458ba248590 mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar rm -rf src/code.google.com/p/go mkdir -p src/code.google.com/p/go/src/pkg/archive mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar clone git github.com/godbus/dbus v1 -clone git github.com/coreos/go-systemd v1 +clone git github.com/coreos/go-systemd v2 diff --git a/integration-cli/MAINTAINERS b/integration-cli/MAINTAINERS new file mode 100644 index 0000000000..6dde4769d7 --- /dev/null +++ b/integration-cli/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/integration-cli/build_tests/TestAdd/DirContentToExistDir/Dockerfile b/integration-cli/build_tests/TestAdd/DirContentToExistDir/Dockerfile new file mode 100644 index 0000000000..6ab0e98f49 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/DirContentToExistDir/Dockerfile @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] diff --git a/integration-cli/build_tests/TestAdd/DirContentToExistDir/test_dir/test_file b/integration-cli/build_tests/TestAdd/DirContentToExistDir/test_dir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestAdd/DirContentToRoot/Dockerfile b/integration-cli/build_tests/TestAdd/DirContentToRoot/Dockerfile new file mode 100644 index 0000000000..03a9c052fd --- /dev/null +++ b/integration-cli/build_tests/TestAdd/DirContentToRoot/Dockerfile @@ -0,0 +1,8 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestAdd/DirContentToRoot/test_dir/test_file b/integration-cli/build_tests/TestAdd/DirContentToRoot/test_dir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestAdd/EtcToRoot/Dockerfile b/integration-cli/build_tests/TestAdd/EtcToRoot/Dockerfile new file mode 100644 index 0000000000..58c75b00f3 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/EtcToRoot/Dockerfile @@ -0,0 +1,2 @@ +FROM scratch +ADD . / diff --git a/integration-cli/build_tests/TestAdd/SingleFileToExistDir/Dockerfile b/integration-cli/build_tests/TestAdd/SingleFileToExistDir/Dockerfile new file mode 100644 index 0000000000..fefbd09f0c --- /dev/null +++ b/integration-cli/build_tests/TestAdd/SingleFileToExistDir/Dockerfile @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestAdd/SingleFileToExistDir/test_file b/integration-cli/build_tests/TestAdd/SingleFileToExistDir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/Dockerfile b/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/Dockerfile new file mode 100644 index 0000000000..661990b7f4 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/test_file b/integration-cli/build_tests/TestAdd/SingleFileToNonExistDir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestAdd/SingleFileToRoot/Dockerfile b/integration-cli/build_tests/TestAdd/SingleFileToRoot/Dockerfile new file mode 100644 index 0000000000..561dbe9c55 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/SingleFileToRoot/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestAdd/SingleFileToWorkdir/Dockerfile b/integration-cli/build_tests/TestAdd/SingleFileToWorkdir/Dockerfile new file mode 100644 index 0000000000..3f076718f2 --- /dev/null +++ b/integration-cli/build_tests/TestAdd/SingleFileToWorkdir/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD test_file . diff --git a/integration-cli/build_tests/TestAdd/WholeDirToRoot/Dockerfile b/integration-cli/build_tests/TestAdd/WholeDirToRoot/Dockerfile new file mode 100644 index 0000000000..03e9ac0b1c --- /dev/null +++ b/integration-cli/build_tests/TestAdd/WholeDirToRoot/Dockerfile @@ -0,0 +1,11 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestBuildCacheADD/1/Dockerfile b/integration-cli/build_tests/TestBuildCacheADD/1/Dockerfile new file mode 100644 index 0000000000..7287771992 --- /dev/null +++ b/integration-cli/build_tests/TestBuildCacheADD/1/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD https://index.docker.io/robots.txt / diff --git a/integration-cli/build_tests/TestBuildCacheADD/2/Dockerfile b/integration-cli/build_tests/TestBuildCacheADD/2/Dockerfile new file mode 100644 index 0000000000..afe79b84b6 --- /dev/null +++ b/integration-cli/build_tests/TestBuildCacheADD/2/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD http://example.com/index.html / diff --git a/integration-cli/build_tests/TestBuildForceRm/Dockerfile b/integration-cli/build_tests/TestBuildForceRm/Dockerfile new file mode 100644 index 0000000000..8468edd4ce --- /dev/null +++ b/integration-cli/build_tests/TestBuildForceRm/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox +RUN true +RUN thiswillfail diff --git a/integration-cli/build_tests/TestBuildRm/Dockerfile b/integration-cli/build_tests/TestBuildRm/Dockerfile new file mode 100644 index 0000000000..190eacf117 --- /dev/null +++ b/integration-cli/build_tests/TestBuildRm/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox +ADD foo / +ADD foo / + diff --git a/integration-cli/build_tests/TestBuildRm/foo b/integration-cli/build_tests/TestBuildRm/foo new file mode 100644 index 0000000000..5716ca5987 --- /dev/null +++ b/integration-cli/build_tests/TestBuildRm/foo @@ -0,0 +1 @@ +bar diff --git a/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile b/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile index 89b66f4f1d..6a2bcab301 100644 --- a/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile +++ b/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile @@ -1,60 +1,60 @@ -FROM busybox -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" -RUN echo "foo" +FROM scratch +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / +ADD foo / diff --git a/integration-cli/build_tests/TestBuildSixtySteps/foo b/integration-cli/build_tests/TestBuildSixtySteps/foo new file mode 100644 index 0000000000..7898192261 --- /dev/null +++ b/integration-cli/build_tests/TestBuildSixtySteps/foo @@ -0,0 +1 @@ +a diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/Dockerfile b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/Dockerfile new file mode 100644 index 0000000000..0964b8e87c --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD . /foo/ diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/directoryWeCantStat/bar b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/directoryWeCantStat/bar new file mode 100644 index 0000000000..257cc5642c --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessibledirectory/directoryWeCantStat/bar @@ -0,0 +1 @@ +foo diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/Dockerfile b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/Dockerfile new file mode 100644 index 0000000000..0964b8e87c --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD . /foo/ diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/fileWithoutReadAccess b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/fileWithoutReadAccess new file mode 100644 index 0000000000..b25f9a2a19 --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/inaccessiblefile/fileWithoutReadAccess @@ -0,0 +1 @@ +should make `docker build` throw an error diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/Dockerfile b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/Dockerfile new file mode 100644 index 0000000000..0964b8e87c --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +ADD . /foo/ diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/g b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/g new file mode 120000 index 0000000000..5fc3f33923 --- /dev/null +++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/linksdirectory/g @@ -0,0 +1 @@ +../../../../../../../../../../../../../../../../../../../azA \ No newline at end of file diff --git a/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile new file mode 100644 index 0000000000..d63e8538bb --- /dev/null +++ b/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] diff --git a/integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file b/integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile new file mode 100644 index 0000000000..45df77e563 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile @@ -0,0 +1,8 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file b/integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile b/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile new file mode 100644 index 0000000000..e6bc0c0dd2 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +COPY https://index.docker.io/robots.txt / diff --git a/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile new file mode 100644 index 0000000000..b4f319f80f --- /dev/null +++ b/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile @@ -0,0 +1,2 @@ +FROM scratch +COPY . / diff --git a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile new file mode 100644 index 0000000000..3edfe661d4 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile new file mode 100644 index 0000000000..33b65a62c7 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile new file mode 100644 index 0000000000..38fd09026d --- /dev/null +++ b/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile new file mode 100644 index 0000000000..ba2d797e35 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +COPY test_file . diff --git a/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile new file mode 100644 index 0000000000..91be29fe7a --- /dev/null +++ b/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile @@ -0,0 +1,11 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 7cd42dc69c..f000235843 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2,11 +2,46 @@ package main import ( "fmt" + + "os" "os/exec" "path/filepath" + "strings" "testing" + "time" ) +func TestBuildCacheADD(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildCacheADD", "1") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcacheadd1", ".") + buildCmd.Dir = buildDirectory + exitCode, err := runCommand(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + buildDirectory = filepath.Join(workingDirectory, "build_tests", "TestBuildCacheADD", "2") + buildCmd = exec.Command(dockerBinary, "build", "-t", "testcacheadd2", ".") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + if strings.Contains(out, "Using cache") { + t.Fatal("2nd build used cache on ADD, it shouldn't") + } + + deleteImages("testcacheadd1") + deleteImages("testcacheadd2") + + logDone("build - build two images with ADD") +} + func TestBuildSixtySteps(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildSixtySteps") buildCmd := exec.Command(dockerBinary, "build", "-t", "foobuildsixtysteps", ".") @@ -23,6 +58,1078 @@ func TestBuildSixtySteps(t *testing.T) { logDone("build - build an image with sixty build steps") } -// TODO: TestCaching +func TestAddSingleFileToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd", "SingleFileToRoot") + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", ".") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) -// TODO: TestADDCacheInvalidation + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add single file to root") +} + +// Issue #3960: "ADD src ." hangs +func TestAddSingleFileToWorkdir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd", "SingleFileToWorkdir") + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", ".") + buildCmd.Dir = buildDirectory + done := make(chan error) + go func() { + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + done <- fmt.Errorf("build failed to complete: %s %v", out, err) + return + } + done <- nil + }() + select { + case <-time.After(5 * time.Second): + if err := buildCmd.Process.Kill(); err != nil { + fmt.Printf("could not kill build (pid=%d): %v\n", buildCmd.Process.Pid, err) + } + t.Fatal("build timed out") + case err := <-done: + if err != nil { + t.Fatal(err) + } + } + + deleteImages("testaddimg") + + logDone("build - add single file to workdir") +} + +func TestAddSingleFileToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "SingleFileToExistDir") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add single file to existing dir") +} + +func TestAddSingleFileToNonExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "SingleFileToNonExistDir") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add single file to non-existing dir") +} + +func TestAddDirContentToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "DirContentToRoot") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add directory contents to root") +} + +func TestAddDirContentToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "DirContentToExistDir") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add directory contents to existing dir") +} + +func TestAddWholeDirToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd", "WholeDirToRoot") + test_dir := filepath.Join(buildDirectory, "test_dir") + if err := os.MkdirAll(test_dir, 0755); err != nil { + t.Fatal(err) + } + f, err := os.OpenFile(filepath.Join(test_dir, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", ".") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - add whole directory to root") +} + +func TestAddEtcToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "EtcToRoot") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + logDone("build - add etc directory to root") +} + +func TestCopySingleFileToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", "SingleFileToRoot") + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", ".") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to root") +} + +// Issue #3960: "ADD src ." hangs - adapted for COPY +func TestCopySingleFileToWorkdir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", "SingleFileToWorkdir") + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", ".") + buildCmd.Dir = buildDirectory + done := make(chan error) + go func() { + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + done <- fmt.Errorf("build failed to complete: %s %v", out, err) + return + } + done <- nil + }() + select { + case <-time.After(5 * time.Second): + if err := buildCmd.Process.Kill(); err != nil { + fmt.Printf("could not kill build (pid=%d): %v\n", buildCmd.Process.Pid, err) + } + t.Fatal("build timed out") + case err := <-done: + if err != nil { + t.Fatal(err) + } + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to workdir") +} + +func TestCopySingleFileToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "SingleFileToExistDir") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - add single file to existing dir") +} + +func TestCopySingleFileToNonExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "SingleFileToNonExistDir") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to non-existing dir") +} + +func TestCopyDirContentToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DirContentToRoot") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy directory contents to root") +} + +func TestCopyDirContentToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DirContentToExistDir") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy directory contents to existing dir") +} + +func TestCopyWholeDirToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", "WholeDirToRoot") + test_dir := filepath.Join(buildDirectory, "test_dir") + if err := os.MkdirAll(test_dir, 0755); err != nil { + t.Fatal(err) + } + f, err := os.OpenFile(filepath.Join(test_dir, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", ".") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy whole directory to root") +} + +func TestCopyEtcToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "EtcToRoot") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + logDone("build - copy etc directory to root") +} + +func TestCopyDisallowRemote(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DisallowRemote") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + + if err == nil || exitCode == 0 { + t.Fatalf("building the image should've failed; output: %s", out) + } + + deleteImages("testcopyimg") + logDone("build - copy - disallow copy from remote") +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func TestBuildWithInaccessibleFilesInContext(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildWithInaccessibleFilesInContext") + + { + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToInaccessibleFileBuildDirectory := filepath.Join(buildDirectory, "inaccessiblefile") + pathToFileWithoutReadAccess := filepath.Join(pathToInaccessibleFileBuildDirectory, "fileWithoutReadAccess") + + err := os.Chown(pathToFileWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown file to root: %s", err)) + err = os.Chmod(pathToFileWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 700: %s", err)) + + buildCommandStatement := fmt.Sprintf("%s build -t inaccessiblefiles .", dockerBinary) + buildCmd := exec.Command("su", "unprivilegeduser", "-c", buildCommandStatement) + buildCmd.Dir = pathToInaccessibleFileBuildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + if err == nil || exitCode == 0 { + t.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "no permission to read from ") { + t.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + } + + if !strings.Contains(out, "Error checking context is accessible") { + t.Fatalf("output should've contained the string: Error checking context is accessible") + } + } + { + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToInaccessibleDirectoryBuildDirectory := filepath.Join(buildDirectory, "inaccessibledirectory") + pathToDirectoryWithoutReadAccess := filepath.Join(pathToInaccessibleDirectoryBuildDirectory, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) + err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) + errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) + err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + + buildCommandStatement := fmt.Sprintf("%s build -t inaccessiblefiles .", dockerBinary) + buildCmd := exec.Command("su", "unprivilegeduser", "-c", buildCommandStatement) + buildCmd.Dir = pathToInaccessibleDirectoryBuildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + if err == nil || exitCode == 0 { + t.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "can't stat") { + t.Fatalf("output should've contained the string: can't access %s", out) + } + + if !strings.Contains(out, "Error checking context is accessible") { + t.Fatalf("output should've contained the string: Error checking context is accessible") + } + + } + { + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + pathToDirectoryWhichContainsLinks := filepath.Join(buildDirectory, "linksdirectory") + + buildCmd := exec.Command(dockerBinary, "build", "-t", "testlinksok", ".") + buildCmd.Dir = pathToDirectoryWhichContainsLinks + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + t.Fatalf("build should have worked: %s %s", err, out) + } + + deleteImages("testlinksok") + + } + deleteImages("inaccessiblefiles") + logDone("build - ADD from context with inaccessible files must fail") + logDone("build - ADD from context with accessible links must work") +} + +func TestBuildForceRm(t *testing.T) { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildForceRm") + buildCmd := exec.Command(dockerBinary, "build", "--force-rm", ".") + buildCmd.Dir = buildDirectory + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err == nil || exitCode == 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("--force-rm shouldn't have left containers behind") + } + + logDone("build - ensure --force-rm doesn't leave containers behind") +} + +func TestBuildRm(t *testing.T) { + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildRm") + buildCmd := exec.Command(dockerBinary, "build", "--rm", "-t", "testbuildrm", ".") + buildCmd.Dir = buildDirectory + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("-rm shouldn't have left containers behind") + } + deleteImages("testbuildrm") + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildRm") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testbuildrm", ".") + buildCmd.Dir = buildDirectory + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("--rm shouldn't have left containers behind") + } + deleteImages("testbuildrm") + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildRm") + buildCmd := exec.Command(dockerBinary, "build", "--rm=false", "-t", "testbuildrm", ".") + buildCmd.Dir = buildDirectory + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore == containerCountAfter { + t.Fatalf("--rm=false should have left containers behind") + } + deleteAllContainers() + deleteImages("testbuildrm") + + } + + logDone("build - ensure --rm doesn't leave containers behind and that --rm=true is the default") + logDone("build - ensure --rm=false overrides the default") +} +func TestBuildWithVolumes(t *testing.T) { + name := "testbuildvolumes" + expected := "map[/test1:map[] /test2:map[]]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + VOLUME /test1 + VOLUME /test2`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Volumes %s, expected %s", res, expected) + } + logDone("build - with volumes") +} + +func TestBuildMaintainer(t *testing.T) { + name := "testbuildmaintainer" + expected := "dockerio" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Author") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Maintainer %s, expected %s", res, expected) + } + logDone("build - maintainer") +} + +func TestBuildUser(t *testing.T) { + name := "testbuilduser" + expected := "dockerio" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio + RUN [ $(whoami) = 'dockerio' ]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.User") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("User %s, expected %s", res, expected) + } + logDone("build - user") +} + +func TestBuildRelativeWorkdir(t *testing.T) { + name := "testbuildrelativeworkdir" + expected := "/test2/test3" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + RUN [ "$PWD" = '/' ] + WORKDIR test1 + RUN [ "$PWD" = '/test1' ] + WORKDIR /test2 + RUN [ "$PWD" = '/test2' ] + WORKDIR test3 + RUN [ "$PWD" = '/test2/test3' ]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.WorkingDir") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Workdir %s, expected %s", res, expected) + } + logDone("build - relative workdir") +} + +func TestBuildEnv(t *testing.T) { + name := "testbuildenv" + expected := "[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Env") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Env %s, expected %s", res, expected) + } + logDone("build - env") +} + +func TestBuildCmd(t *testing.T) { + name := "testbuildcmd" + expected := "[/bin/echo Hello World]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + CMD ["/bin/echo", "Hello World"]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Cmd %s, expected %s", res, expected) + } + logDone("build - cmd") +} + +func TestBuildExpose(t *testing.T) { + name := "testbuildexpose" + expected := "map[2375/tcp:map[]]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Exposed ports %s, expected %s", res, expected) + } + logDone("build - expose") +} + +func TestBuildEntrypoint(t *testing.T) { + name := "testbuildentrypoint" + expected := "[/bin/echo]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + logDone("build - entrypoint") +} + +func TestBuildWithCache(t *testing.T) { + name := "testbuildwithcache" + defer deleteImages(name) + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - with cache") +} + +func TestBuildWithoutCache(t *testing.T) { + name := "testbuildwithoutcache" + defer deleteImages(name) + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - without cache") +} + +func TestBuildADDLocalFileWithCache(t *testing.T) { + name := "testbuildaddlocalfilewithcache" + defer deleteImages(name) + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add local file with cache") +} + +func TestBuildADDLocalFileWithoutCache(t *testing.T) { + name := "testbuildaddlocalfilewithoutcache" + defer deleteImages(name) + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add local file without cache") +} + +func TestBuildADDCurrentDirWithCache(t *testing.T) { + name := "testbuildaddcurrentdirwithcache" + defer deleteImages(name) + dockerfile := ` + FROM scratch + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + // Check that adding file invalidate cache of "ADD ." + if err := ctx.Add("bar", "hello2"); err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file invalidate cache of "ADD ." + if err := ctx.Add("foo", "hello1"); err != nil { + t.Fatal(err) + } + id3, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id2 == id3 { + t.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file to same content invalidate cache of "ADD ." + time.Sleep(1 * time.Second) // wait second because of mtime precision + if err := ctx.Add("foo", "hello1"); err != nil { + t.Fatal(err) + } + id4, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id3 == id4 { + t.Fatal("The cache should have been invalided but hasn't.") + } + id5, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id4 != id5 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add current directory with cache") +} + +func TestBuildADDCurrentDirWithoutCache(t *testing.T) { + name := "testbuildaddcurrentdirwithoutcache" + defer deleteImages(name) + dockerfile := ` + FROM scratch + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add current directory without cache") +} + +func TestBuildADDRemoteFileWithCache(t *testing.T) { + name := "testbuildaddremotefilewithcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + id1, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add remote file with cache") +} + +func TestBuildADDRemoteFileWithoutCache(t *testing.T) { + name := "testbuildaddremotefilewithoutcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + id1, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add remote file without cache") +} + +func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { + name := "testbuildaddlocalandremotefilewithcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add local and remote file with cache") +} + +// TODO: TestCaching +func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { + name := "testbuildaddlocalandremotefilewithoutcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add local and remote file without cache") +} + +func TestBuildWithVolumeOwnership(t *testing.T) { + name := "testbuildimg" + defer deleteImages(name) + + _, err := buildImage(name, + `FROM busybox:latest + RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test + VOLUME /test`, + true) + + if err != nil { + t.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "run", "--rm", "testbuildimg", "ls", "-la", "/test") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if expected := "drw-------"; !strings.Contains(out, expected) { + t.Fatalf("expected %s received %s", expected, out) + } + + if expected := "daemon daemon"; !strings.Contains(out, expected) { + t.Fatalf("expected %s received %s", expected, out) + } + + logDone("build - volume ownership") +} diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go index e99379231e..c02c89cd30 100644 --- a/integration-cli/docker_cli_commit_test.go +++ b/integration-cli/docker_cli_commit_test.go @@ -62,3 +62,49 @@ func TestCommitNewFile(t *testing.T) { logDone("commit - commit file and read") } + +func TestCommitTTY(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "tty", "ttytest") + imageId, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + imageId = strings.Trim(imageId, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "ttytest", "/bin/ls") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } +} + +func TestCommitWithHostBindMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "bind-commit", "bindtest") + imageId, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + imageId = strings.Trim(imageId, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "bindtest", "true") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + deleteImages(imageId) + + logDone("commit - commit bind mounted file") +} diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go new file mode 100644 index 0000000000..aecc68edb4 --- /dev/null +++ b/integration-cli/docker_cli_cp_test.go @@ -0,0 +1,373 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func TestCpGarbagePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("../../../../../../../../../../../../", cpFullPath) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- garbage path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for garbage path") + } + + logDone("cp - garbage paths relative to container's rootfs") +} + +// Check that relative paths are relative to the container's rootfs +func TestCpRelativePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path, _ := filepath.Rel("/", cpFullPath) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- relative path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for relative path") + } + + logDone("cp - relative paths relative to container's rootfs") +} + +// Check that absolute paths are relative to the container's rootfs +func TestCpAbsolutePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- absolute path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for absolute path") + } + + logDone("cp - absolute paths relative to container's rootfs") +} + +// Test for #5619 +// Check that absolute symlinks are still relative to the container's rootfs +func TestCpAbsoluteSymlink(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("/", "container_path") + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- absolute symlink can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for absolute symlink") + } + + logDone("cp - absolute symlink relative to container's rootfs") +} + +// Test for #5619 +// Check that symlinks which are part of the resource path are still relative to the container's rootfs +func TestCpSymlinkComponent(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("/", "container_path", cpTestName) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- symlink path component can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for symlink path component") + } + + logDone("cp - symlink path components relative to container's rootfs") +} + +// Check that cp with unprivileged user doesn't return any error +func TestCpUnprivilegedUser(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpdir) + + if err = os.Chmod(tmpdir, 0777); err != nil { + t.Fatal(err) + } + + path := cpTestName + + _, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+cleanedContainerID+":"+path+" "+tmpdir)) + if err != nil { + t.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err) + } + + logDone("cp - unprivileged user") +} diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index 55c41e0bbc..0480183bc7 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -2,10 +2,47 @@ package main import ( "fmt" + "github.com/dotcloud/docker/pkg/iptables" + "io/ioutil" + "os" "os/exec" + "strings" "testing" ) +func TestEtcHostsRegularFile(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if !strings.HasPrefix(out, "-") { + t.Errorf("/etc/hosts should be a regular file") + } + + deleteAllContainers() + + logDone("link - /etc/hosts is a regular file") +} + +func TestEtcHostsContentMatch(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hosts") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + t.Skip("/etc/hosts does not exist, skip this test") + } + + if out != string(hosts) { + t.Errorf("container") + } + + deleteAllContainers() + + logDone("link - /etc/hosts matches hosts copy") +} + func TestPingUnlinkedContainers(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") exitCode, err := runCommand(runCmd) @@ -28,3 +65,28 @@ func TestPingLinkedContainers(t *testing.T) { cmd(t, "kill", idB) deleteAllContainers() } + +func TestIpTablesRulesWhenLinkAndUnlink(t *testing.T) { + cmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") + + childIp := findContainerIp(t, "child") + parentIp := findContainerIp(t, "parent") + + sourceRule := []string{"FORWARD", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", childIp, "--sport", "80", "-d", parentIp, "-j", "ACCEPT"} + destinationRule := []string{"FORWARD", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", parentIp, "--dport", "80", "-d", childIp, "-j", "ACCEPT"} + if !iptables.Exists(sourceRule...) || !iptables.Exists(destinationRule...) { + t.Fatal("Iptables rules not found") + } + + cmd(t, "rm", "--link", "parent/http") + if iptables.Exists(sourceRule...) || iptables.Exists(destinationRule...) { + t.Fatal("Iptables rules should be removed when unlink") + } + + cmd(t, "kill", "child") + cmd(t, "kill", "parent") + deleteAllContainers() + + logDone("link - verify iptables when link and unlink") +} diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go index 90af933be9..3816e54050 100644 --- a/integration-cli/docker_cli_nat_test.go +++ b/integration-cli/docker_cli_nat_test.go @@ -3,22 +3,14 @@ package main import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/daemon" "net" "os/exec" - "path/filepath" "testing" + + "github.com/dotcloud/docker/daemon" ) func TestNetworkNat(t *testing.T) { - ncPath, err := exec.LookPath("nc") - if err != nil { - t.Skip("Test not running with `make test`. Netcat not found: %s", err) - } - ncPath, err = filepath.EvalSymlinks(ncPath) - if err != nil { - t.Fatalf("Error resolving netcat symlink: %s", err) - } iface, err := net.InterfaceByName("eth0") if err != nil { t.Skip("Test not running with `make test`. Interface eth0 not found: %s", err) @@ -34,10 +26,7 @@ func TestNetworkNat(t *testing.T) { t.Fatalf("Error retrieving the up for eth0: %s", err) } - runCmd := exec.Command(dockerBinary, "run", "-d", - "-v", ncPath+":/bin/nc", - "-v", "/lib/x86_64-linux-gnu/libc.so.6:/lib/libc.so.6", "-v", "/lib/x86_64-linux-gnu/libresolv.so.2:/lib/libresolv.so.2", "-v", "/lib/x86_64-linux-gnu/libbsd.so.0:/lib/libbsd.so.0", "-v", "/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2:/lib/ld-linux-x86-64.so.2", - "-p", "8080", "busybox", "/bin/nc", "-lp", "8080") + runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "8080", "busybox", "nc", "-lp", "8080") out, _, err := runCommandWithOutput(runCmd) errorOut(err, t, fmt.Sprintf("run1 failed with errors: %v (%s)", err, out)) @@ -60,10 +49,7 @@ func TestNetworkNat(t *testing.T) { t.Fatal("Port 8080/tcp not found in NetworkSettings") } - runCmd = exec.Command(dockerBinary, "run", - "-v", ncPath+":/bin/nc", - "-v", "/lib/x86_64-linux-gnu/libc.so.6:/lib/libc.so.6", "-v", "/lib/x86_64-linux-gnu/libresolv.so.2:/lib/libresolv.so.2", "-v", "/lib/x86_64-linux-gnu/libbsd.so.0:/lib/libbsd.so.0", "-v", "/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2:/lib/ld-linux-x86-64.so.2", - "-p", "8080", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | /bin/nc -w 30 %s %s", ifaceIp, port8080[0].HostPort)) + runCmd = exec.Command(dockerBinary, "run", "-p", "8080", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s %s", ifaceIp, port8080[0].HostPort)) out, _, err = runCommandWithOutput(runCmd) errorOut(err, t, fmt.Sprintf("run2 failed with errors: %v (%s)", err, out)) diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go index 13b443f3d6..fa7d2be44e 100644 --- a/integration-cli/docker_cli_pull_test.go +++ b/integration-cli/docker_cli_pull_test.go @@ -8,7 +8,7 @@ import ( // pulling an image from the central registry should work func TestPullImageFromCentralRegistry(t *testing.T) { - pullCmd := exec.Command(dockerBinary, "pull", "busybox") + pullCmd := exec.Command(dockerBinary, "pull", "busybox:latest") out, exitCode, err := runCommandWithOutput(pullCmd) errorOut(err, t, fmt.Sprintf("%s %s", out, err)) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index b9737feeea..545ad371ee 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -5,6 +5,7 @@ import ( "os" "os/exec" "path/filepath" + "reflect" "regexp" "sort" "strings" @@ -251,13 +252,13 @@ func TestDockerRunWorkingDirectory(t *testing.T) { // pinging Google's DNS resolver should fail when we disable the networking func TestDockerRunWithoutNetworking(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ping", "-c", "1", "8.8.8.8") + runCmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "8.8.8.8") out, _, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 1 { t.Fatal(out, err) } if exitCode != 1 { - t.Errorf("--networking=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + t.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") } runCmd = exec.Command(dockerBinary, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8") @@ -271,7 +272,7 @@ func TestDockerRunWithoutNetworking(t *testing.T) { deleteAllContainers() - logDone("run - disable networking with --networking=false") + logDone("run - disable networking with --net=none") logDone("run - disable networking with -n=false") } @@ -438,7 +439,7 @@ func TestCreateVolume(t *testing.T) { deleteAllContainers() - logDone("run - create docker mangaed volume") + logDone("run - create docker managed volume") } // Test that creating a volume with a symlink in its path works correctly. Test for #5152. @@ -544,6 +545,51 @@ func TestUserByID(t *testing.T) { logDone("run - user by id") } +func TestUserByIDBig(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "2147483648", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + t.Fatalf("expected error about uids range, got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, id too big") +} + +func TestUserByIDNegative(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "-1", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + t.Fatalf("expected error about uids range, got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, id negative") +} + +func TestUserByIDZero(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "0", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + t.Fatalf("expected daemon user got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, zero uid") +} + func TestUserNotFound(t *testing.T) { cmd := exec.Command(dockerBinary, "run", "-u", "notme", "busybox", "id") @@ -633,7 +679,7 @@ func TestContainerNetwork(t *testing.T) { // Issue #4681 func TestLoopbackWhenNetworkDisabled(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ping", "-c", "1", "127.0.0.1") + cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } @@ -644,18 +690,29 @@ func TestLoopbackWhenNetworkDisabled(t *testing.T) { } func TestLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ip", "a", "show", "up") + cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } - interfaces := regexp.MustCompile(`(?m)^[0-9]+: [a-zA-Z0-9]+`).FindAllString(out, -1) - if len(interfaces) != 1 { - t.Fatalf("Wrong interface count in test container: expected [*: lo], got %s", interfaces) + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } } - if !strings.HasSuffix(interfaces[0], ": lo") { - t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces) + + if count != 1 { + t.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + t.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) } deleteAllContainers() @@ -768,3 +825,63 @@ func TestProcWritableInPrivilegedContainers(t *testing.T) { logDone("run - proc writable in privileged container") } + +func TestRunWithCpuset(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cpuset", "0", "busybox", "true") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("container should run successfuly with cpuset of 0: %s", err) + } + + deleteAllContainers() + + logDone("run - cpuset 0") +} + +func TestDeviceNumbers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "ls -l /dev/null") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + deviceLineFields := strings.Fields(out) + deviceLineFields[6] = "" + deviceLineFields[7] = "" + deviceLineFields[8] = "" + expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} + + if !(reflect.DeepEqual(deviceLineFields, expected)) { + t.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + } + deleteAllContainers() + + logDone("run - test device numbers") +} + +func TestThatCharacterDevicesActLikeCharacterDevices(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { + t.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + } + deleteAllContainers() + + logDone("run - test that character devices work.") +} + +func TestRunUnprivilegedWithChroot(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "true") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - unprivileged with chroot") +} diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go index d75b7db385..ef51f64644 100644 --- a/integration-cli/docker_cli_tag_test.go +++ b/integration-cli/docker_cli_tag_test.go @@ -27,7 +27,7 @@ func TestTagUnprefixedRepoByName(t *testing.T) { // tagging an image by ID in a new unprefixed repo should work func TestTagUnprefixedRepoByID(t *testing.T) { - getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.id}}", "busybox") + getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox") out, _, err := runCommandWithOutput(getIDCmd) errorOut(err, t, fmt.Sprintf("failed to get the image ID of busybox: %v", err)) diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go index f18d5bede6..7f1838e5d9 100644 --- a/integration-cli/docker_cli_version_test.go +++ b/integration-cli/docker_cli_version_test.go @@ -24,9 +24,8 @@ func TestVersionEnsureSucceeds(t *testing.T) { "Git commit (client):", "Server version:", "Server API version:", - "Git commit (server):", "Go version (server):", - "Last stable version:", + "Git commit (server):", } for _, linePrefix := range stringsToCheck { diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go index f8bd5c116b..78e0685a9d 100644 --- a/integration-cli/docker_test_vars.go +++ b/integration-cli/docker_test_vars.go @@ -1,7 +1,9 @@ package main import ( + "fmt" "os" + "os/exec" ) // the docker binary to use @@ -18,6 +20,15 @@ var workingDirectory string func init() { if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { dockerBinary = dockerBin + } else { + whichCmd := exec.Command("which", "docker") + out, _, err := runCommandWithOutput(whichCmd) + if err == nil { + dockerBinary = stripTrailingCharacters(out) + } else { + fmt.Printf("ERROR: couldn't resolve full path to the Docker binary") + os.Exit(1) + } } if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { registryImageName = registryImage diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 6da86c9753..c1e306f2ee 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -2,7 +2,13 @@ package main import ( "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" "os/exec" + "path" + "strconv" "strings" "testing" ) @@ -61,3 +67,155 @@ func cmd(t *testing.T, args ...string) (string, int, error) { errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out)) return out, status, err } + +func findContainerIp(t *testing.T, id string) string { + cmd := exec.Command(dockerBinary, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + return strings.Trim(out, " \r\n'") +} + +func getContainerCount() (int, error) { + const containers = "Containers:" + + cmd := exec.Command(dockerBinary, "info") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return 0, err + } + + lines := strings.Split(out, "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := stripTrailingCharacters(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + if err != nil { + return 0, err + } + return containerCount, nil + } + } + return 0, fmt.Errorf("couldn't find the Container count in the output") +} + +type FakeContext struct { + Dir string +} + +func (f *FakeContext) Add(file, content string) error { + filepath := path.Join(f.Dir, file) + dirpath := path.Dir(filepath) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(filepath, []byte(content), 0644) +} + +func (f *FakeContext) Delete(file string) error { + filepath := path.Join(f.Dir, file) + return os.RemoveAll(filepath) +} + +func (f *FakeContext) Close() error { + return os.RemoveAll(f.Dir) +} + +func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return nil, err + } + ctx := &FakeContext{tmp} + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + if err := ctx.Add("Dockerfile", dockerfile); err != nil { + ctx.Close() + return nil, err + } + return ctx, nil +} + +type FakeStorage struct { + *FakeContext + *httptest.Server +} + +func (f *FakeStorage) Close() error { + f.Server.Close() + return f.FakeContext.Close() +} + +func fakeStorage(files map[string]string) (*FakeStorage, error) { + tmp, err := ioutil.TempDir("", "fake-storage") + if err != nil { + return nil, err + } + ctx := &FakeContext{tmp} + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &FakeStorage{ + FakeContext: ctx, + Server: server, + }, nil +} + +func inspectField(name, field string) (string, error) { + format := fmt.Sprintf("{{.%s}}", field) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func getIDByName(name string) (string, error) { + return inspectField(name, "Id") +} + +func buildImage(name, dockerfile string, useCache bool) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +func buildImageFromContext(name string, ctx *FakeContext, useCache bool) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} diff --git a/integration/api_test.go b/integration/api_test.go index 04611dfe3d..969e0fbaf2 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -536,7 +536,6 @@ func TestGetContainersByName(t *testing.T) { func TestPostCommit(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) // Create a container and remove a file containerID := createTestContainer(eng, @@ -567,7 +566,7 @@ func TestPostCommit(t *testing.T) { if err := env.Decode(r.Body); err != nil { t.Fatal(err) } - if _, err := srv.ImageInspect(env.Get("Id")); err != nil { + if err := eng.Job("image_inspect", env.Get("Id")).Run(); err != nil { t.Fatalf("The image has not been committed") } } diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index b87fa116eb..8c02cf043c 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -1,19 +1,22 @@ package docker import ( + "bytes" + "encoding/json" "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/server" - "github.com/dotcloud/docker/utils" "io/ioutil" "net" "net/http" "net/http/httptest" "strings" "testing" + + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/server" + "github.com/dotcloud/docker/utils" ) // A testContextTemplate describes a build context and how to test it @@ -394,153 +397,21 @@ func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, u } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) + buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) id, err := buildfile.Build(context.Archive(dockerfile, t)) if err != nil { return nil, err } - return srv.ImageInspect(id) -} - -func TestVolume(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - volume /test - cmd Hello world - `, nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if len(img.Config.Volumes) == 0 { - t.Fail() - } - for key := range img.Config.Volumes { - if key != "/test" { - t.Fail() - } - } -} - -func TestBuildMaintainer(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - maintainer dockerio - `, nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if img.Author != "dockerio" { - t.Fail() - } -} - -func TestBuildUser(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - user dockerio - `, nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if img.Config.User != "dockerio" { - t.Fail() - } -} - -func TestBuildRelativeWorkdir(t *testing.T) { - img, err := buildImage(testContextTemplate{` - FROM {IMAGE} - RUN [ "$PWD" = '/' ] - WORKDIR test1 - RUN [ "$PWD" = '/test1' ] - WORKDIR /test2 - RUN [ "$PWD" = '/test2' ] - WORKDIR test3 - RUN [ "$PWD" = '/test2/test3' ] - `, nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - if img.Config.WorkingDir != "/test2/test3" { - t.Fatalf("Expected workdir to be '/test2/test3', received '%s'", img.Config.WorkingDir) - } -} - -func TestBuildEnv(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - env port 4243 - `, - nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - hasEnv := false - for _, envVar := range img.Config.Env { - if envVar == "port=4243" { - hasEnv = true - break - } - } - if !hasEnv { - t.Fail() - } -} - -func TestBuildCmd(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - cmd ["/bin/echo", "Hello World"] - `, - nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if img.Config.Cmd[0] != "/bin/echo" { - t.Log(img.Config.Cmd[0]) - t.Fail() - } - if img.Config.Cmd[1] != "Hello World" { - t.Log(img.Config.Cmd[1]) - t.Fail() - } -} - -func TestBuildExpose(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - expose 4243 - `, - nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if _, exists := img.Config.ExposedPorts[nat.NewPort("tcp", "4243")]; !exists { - t.Fail() - } -} - -func TestBuildEntrypoint(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - entrypoint ["/bin/echo"] - `, - nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if img.Config.Entrypoint[0] != "/bin/echo" { - t.Log(img.Config.Entrypoint[0]) - t.Fail() + job := eng.Job("image_inspect", id) + buffer := bytes.NewBuffer(nil) + image := &image.Image{} + job.Stdout.Add(buffer) + if err := job.Run(); err != nil { + return nil, err } + err = json.NewDecoder(buffer).Decode(image) + return image, err } // testing #1405 - config.Cmd does not get cleaned up if @@ -574,226 +445,6 @@ func TestBuildEntrypointRunCleanup(t *testing.T) { } } -func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bool) (imageId string) { - eng := NewTestEngine(t) - defer nuke(mkDaemonFromEngine(eng, t)) - - img, err := buildImage(template, t, eng, true) - if err != nil { - t.Fatal(err) - } - - imageId = img.ID - - img, err = buildImage(template, t, eng, expectHit) - if err != nil { - t.Fatal(err) - } - - if hit := imageId == img.ID; hit != expectHit { - t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID) - } - return -} - -func checkCacheBehaviorFromEngime(t *testing.T, template testContextTemplate, expectHit bool, eng *engine.Engine) (imageId string) { - img, err := buildImage(template, t, eng, true) - if err != nil { - t.Fatal(err) - } - - imageId = img.ID - - img, err = buildImage(template, t, eng, expectHit) - if err != nil { - t.Fatal(err) - } - - if hit := imageId == img.ID; hit != expectHit { - t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID) - } - return -} - -func TestBuildImageWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - `, - nil, nil} - checkCacheBehavior(t, template, true) -} - -func TestBuildExposeWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - expose 80 - run echo hello - `, - nil, nil} - checkCacheBehavior(t, template, true) -} - -func TestBuildImageWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - `, - nil, nil} - checkCacheBehavior(t, template, false) -} - -func TestBuildADDLocalFileWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add foo /usr/lib/bla/bar - run [ "$(cat /usr/lib/bla/bar)" = "hello" ] - run echo "second" - add . /src/ - run [ "$(cat /src/foo)" = "hello" ] - `, - [][2]string{ - {"foo", "hello"}, - }, - nil} - eng := NewTestEngine(t) - defer nuke(mkDaemonFromEngine(eng, t)) - - id1 := checkCacheBehaviorFromEngime(t, template, true, eng) - template.files = append(template.files, [2]string{"bar", "hello2"}) - id2 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id1 == id2 { - t.Fatal("The cache should have been invalided but hasn't.") - } - id3 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id2 != id3 { - t.Fatal("The cache should have been used but hasn't.") - } - template.files[1][1] = "hello3" - id4 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id3 == id4 { - t.Fatal("The cache should have been invalided but hasn't.") - } - template.dockerfile += ` - add ./bar /src2/ - run ls /src2/bar - ` - id5 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id4 == id5 { - t.Fatal("The cache should have been invalided but hasn't.") - } - template.files[1][1] = "hello4" - id6 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id5 == id6 { - t.Fatal("The cache should have been invalided but hasn't.") - } - - template.dockerfile += ` - add bar /src2/bar2 - add /bar /src2/bar3 - run ls /src2/bar2 /src2/bar3 - ` - id7 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id6 == id7 { - t.Fatal("The cache should have been invalided but hasn't.") - } - template.files[1][1] = "hello5" - id8 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id7 == id8 { - t.Fatal("The cache should have been invalided but hasn't.") - } -} - -func TestBuildADDLocalFileWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add foo /usr/lib/bla/bar - run echo "second" - `, - [][2]string{{"foo", "hello"}}, - nil} - checkCacheBehavior(t, template, false) -} - -func TestBuildADDCurrentDirectoryWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - add . /usr/lib/bla - `, - nil, nil} - checkCacheBehavior(t, template, true) -} - -func TestBuildADDCurrentDirectoryWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - add . /usr/lib/bla - `, - nil, nil} - checkCacheBehavior(t, template, false) -} - -func TestBuildADDRemoteFileWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add http://{SERVERADDR}/baz /usr/lib/baz/quux - run echo "second" - `, - nil, - [][2]string{{"/baz", "world!"}}} - checkCacheBehavior(t, template, true) -} - -func TestBuildADDRemoteFileWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add http://{SERVERADDR}/baz /usr/lib/baz/quux - run echo "second" - `, - nil, - [][2]string{{"/baz", "world!"}}} - checkCacheBehavior(t, template, false) -} - -func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add foo /usr/lib/bla/bar - add http://{SERVERADDR}/baz /usr/lib/baz/quux - run echo "second" - `, - [][2]string{{"foo", "hello"}}, - [][2]string{{"/baz", "world!"}}} - checkCacheBehavior(t, template, true) -} - -func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add foo /usr/lib/bla/bar - add http://{SERVERADDR}/baz /usr/lib/baz/quux - run echo "second" - `, - [][2]string{{"foo", "hello"}}, - [][2]string{{"/baz", "world!"}}} - checkCacheBehavior(t, template, false) -} - func TestForbiddenContextPath(t *testing.T) { eng := NewTestEngine(t) defer nuke(mkDaemonFromEngine(eng, t)) @@ -828,7 +479,7 @@ func TestForbiddenContextPath(t *testing.T) { } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) + buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { @@ -874,7 +525,7 @@ func TestBuildADDFileNotFound(t *testing.T) { } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := server.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) + buildfile := server.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { @@ -894,7 +545,7 @@ func TestBuildInheritance(t *testing.T) { img, err := buildImage(testContextTemplate{` from {IMAGE} - expose 4243 + expose 2375 `, nil, nil}, t, eng, true) @@ -918,7 +569,7 @@ func TestBuildInheritance(t *testing.T) { } // from parent - if _, exists := img.Config.ExposedPorts[nat.NewPort("tcp", "4243")]; !exists { + if _, exists := img.Config.ExposedPorts[nat.NewPort("tcp", "2375")]; !exists { t.Fail() } } diff --git a/integration/commands_test.go b/integration/commands_test.go index 5b967b68cc..4ad225bb43 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -3,12 +3,6 @@ package docker import ( "bufio" "fmt" - "github.com/dotcloud/docker/api/client" - "github.com/dotcloud/docker/daemon" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" @@ -19,6 +13,13 @@ import ( "syscall" "testing" "time" + + "github.com/dotcloud/docker/api/client" + "github.com/dotcloud/docker/daemon" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/utils" ) func closeWrap(args ...io.Closer) error { @@ -1051,11 +1052,12 @@ func TestContainerOrphaning(t *testing.T) { if err := cli.CmdBuild("-t", image, tmpDir); err != nil { t.Fatal(err) } - img, err := srv.ImageInspect(image) - if err != nil { + job := globalEngine.Job("image_get", image) + info, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { t.Fatal(err) } - return img.ID + return info.Get("Id") } // build an image diff --git a/integration/graph_test.go b/integration/graph_test.go index c29055edfc..dc056f7e1c 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -293,7 +293,7 @@ func tempGraph(t *testing.T) (*graph.Graph, graphdriver.Driver) { if err != nil { t.Fatal(err) } - driver, err := graphdriver.New(tmp) + driver, err := graphdriver.New(tmp, nil) if err != nil { t.Fatal(err) } diff --git a/integration/runtime_test.go b/integration/runtime_test.go index c84ea5bed2..96df15be60 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -3,6 +3,19 @@ package docker import ( "bytes" "fmt" + "io" + "log" + "net" + "net/url" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "testing" + "time" + "github.com/dotcloud/docker/daemon" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/image" @@ -10,18 +23,6 @@ import ( "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" - "io" - "log" - "net" - "net/url" - "os" - "path/filepath" - goruntime "runtime" - "strconv" - "strings" - "syscall" - "testing" - "time" ) const ( @@ -127,15 +128,15 @@ func init() { spawnGlobalDaemon() spawnLegitHttpsDaemon() spawnRogueHttpsDaemon() - startFds, startGoroutines = utils.GetTotalUsedFds(), goruntime.NumGoroutine() + startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() } func setupBaseImage() { eng := newTestEngine(log.New(os.Stderr, "", 0), false, unitTestStoreBase) - job := eng.Job("inspect", unitTestImageName, "image") + job := eng.Job("image_inspect", unitTestImageName) img, _ := job.Stdout.AddEnv() // If the unit test is not found, try to download it. - if err := job.Run(); err != nil || img.Get("id") != unitTestImageID { + if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID { // Retrieve the Image job = eng.Job("pull", unitTestImageName) job.Stdout.Add(utils.NopWriteCloser(os.Stdout)) @@ -421,13 +422,14 @@ func TestGet(t *testing.T) { func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) { var ( - err error - id string - strPort string - eng = NewTestEngine(t) - daemon = mkDaemonFromEngine(eng, t) - port = 5554 - p nat.Port + err error + id string + outputBuffer = bytes.NewBuffer(nil) + strPort string + eng = NewTestEngine(t) + daemon = mkDaemonFromEngine(eng, t) + port = 5554 + p nat.Port ) defer func() { if err != nil { @@ -455,10 +457,11 @@ func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daem jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) jobCreate.SetenvJson("ExposedPorts", ep) - jobCreate.Stdout.AddString(&id) + jobCreate.Stdout.Add(outputBuffer) if err := jobCreate.Run(); err != nil { t.Fatal(err) } + id = engine.Tail(outputBuffer, 1) // FIXME: this relies on the undocumented behavior of daemon.Create // which will return a nil error AND container if the exposed ports // are invalid. That behavior should be fixed! @@ -720,12 +723,12 @@ func TestContainerNameValidation(t *testing.T) { t.Fatal(err) } - var shortID string + var outputBuffer = bytes.NewBuffer(nil) job := eng.Job("create", test.Name) if err := job.ImportEnv(config); err != nil { t.Fatal(err) } - job.Stdout.AddString(&shortID) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { if !test.Valid { continue @@ -733,7 +736,7 @@ func TestContainerNameValidation(t *testing.T) { t.Fatal(err) } - container := daemon.Get(shortID) + container := daemon.Get(engine.Tail(outputBuffer, 1)) if container.Name != "/"+test.Name { t.Fatalf("Expect /%s got %s", test.Name, container.Name) diff --git a/integration/server_test.go b/integration/server_test.go index 226247556d..151490cdc6 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -1,12 +1,14 @@ package docker import ( - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/server" + "bytes" "strings" "testing" "time" + + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/server" ) func TestCreateNumberHostname(t *testing.T) { @@ -70,22 +72,22 @@ func TestMergeConfigOnCommit(t *testing.T) { job.Setenv("repo", "testrepo") job.Setenv("tag", "testtag") job.SetenvJson("config", config) - var newId string - job.Stdout.AddString(&newId) + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Error(err) } - container2, _, _ := mkContainer(runtime, []string{newId}, t) + container2, _, _ := mkContainer(runtime, []string{engine.Tail(outputBuffer, 1)}, t) defer runtime.Destroy(container2) - job = eng.Job("inspect", container1.Name, "container") + job = eng.Job("container_inspect", container1.Name) baseContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) } - job = eng.Job("inspect", container2.Name, "container") + job = eng.Job("container_inspect", container2.Name) commitContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) @@ -168,8 +170,6 @@ func TestRestartKillWait(t *testing.T) { setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() { job = srv.Eng.Job("wait", outs.Data[0].Get("Id")) - var statusStr string - job.Stdout.AddString(&statusStr) if err := job.Run(); err != nil { t.Fatal(err) } @@ -266,8 +266,6 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) { job.Setenv("Memory", "524287") job.Setenv("CpuShares", "1000") job.SetenvList("Cmd", []string{"/bin/cat"}) - var id string - job.Stdout.AddString(&id) if err := job.Run(); err == nil { t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!") } @@ -302,13 +300,13 @@ func TestRmi(t *testing.T) { job = eng.Job("commit", containerID) job.Setenv("repo", "test") - var imageID string - job.Stdout.AddString(&imageID) + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } - if err := eng.Job("tag", imageID, "test", "0.1").Run(); err != nil { + if err := eng.Job("tag", engine.Tail(outputBuffer, 1), "test", "0.1").Run(); err != nil { t.Fatal(err) } @@ -339,7 +337,7 @@ func TestRmi(t *testing.T) { t.Fatalf("Expected 2 new images, found %d.", images.Len()-initialImages.Len()) } - if err = srv.DeleteImage(imageID, engine.NewTable("", 0), true, false, false); err != nil { + if err = srv.DeleteImage(engine.Tail(outputBuffer, 1), engine.NewTable("", 0), true, false, false); err != nil { t.Fatal(err) } @@ -400,28 +398,6 @@ func TestImagesFilter(t *testing.T) { } } -// FIXE: 'insert' is deprecated and should be removed in a future version. -func TestImageInsert(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) - - // bad image name fails - if err := srv.Eng.Job("insert", "foo", "https://www.docker.io/static/img/docker-top-logo.png", "/foo").Run(); err == nil { - t.Fatal("expected an error and got none") - } - - // bad url fails - if err := srv.Eng.Job("insert", unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo").Run(); err == nil { - t.Fatal("expected an error and got none") - } - - // success returns nil - if err := srv.Eng.Job("insert", unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo").Run(); err != nil { - t.Fatalf("expected no error, but got %v", err) - } -} - func TestListContainers(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) diff --git a/integration/utils_test.go b/integration/utils_test.go index 6901662ce6..d8101dfb1d 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -3,7 +3,6 @@ package docker import ( "bytes" "fmt" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "net/http" @@ -14,6 +13,8 @@ import ( "testing" "time" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "github.com/dotcloud/docker/builtins" "github.com/dotcloud/docker/daemon" "github.com/dotcloud/docker/engine" @@ -42,11 +43,12 @@ func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f ut if err := job.ImportEnv(config); err != nil { f.Fatal(err) } - job.Stdout.AddString(&shortId) + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { f.Fatal(err) } - return + return engine.Tail(outputBuffer, 1) } func createTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler) (shortId string) { diff --git a/nat/nat.go b/nat/nat.go index 7aad775d70..31633dd544 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -5,9 +5,10 @@ package nat import ( "fmt" - "github.com/dotcloud/docker/utils" "strconv" "strings" + + "github.com/dotcloud/docker/utils" ) const ( @@ -72,6 +73,15 @@ func SplitProtoPort(rawPort string) (string, string) { return parts[1], parts[0] } +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + // We will receive port specs in the format of ip:public:private/proto and these need to be // parsed in the internal types func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { @@ -113,6 +123,9 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, if _, err := strconv.ParseUint(hostPort, 10, 16); hostPort != "" && err != nil { return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) } + if !validateProto(proto) { + return nil, nil, fmt.Errorf("Invalid proto: %s", proto) + } port := NewPort(proto, containerPort) if _, exists := exposedPorts[port]; !exists { diff --git a/pkg/apparmor/setup.go b/pkg/apparmor/setup.go index ef6333a01a..8ed5437470 100644 --- a/pkg/apparmor/setup.go +++ b/pkg/apparmor/setup.go @@ -2,7 +2,6 @@ package apparmor import ( "fmt" - "io" "os" "os/exec" "path" @@ -12,42 +11,11 @@ const ( DefaultProfilePath = "/etc/apparmor.d/docker" ) -func InstallDefaultProfile(backupPath string) error { +func InstallDefaultProfile() error { if !IsEnabled() { return nil } - // If the profile already exists, check if we already have a backup - // if not, do the backup and override it. (docker 0.10 upgrade changed the apparmor profile) - // see gh#5049, apparmor blocks signals in ubuntu 14.04 - if _, err := os.Stat(DefaultProfilePath); err == nil { - if _, err := os.Stat(backupPath); err == nil { - // If both the profile and the backup are present, do nothing - return nil - } - // Make sure the directory exists - if err := os.MkdirAll(path.Dir(backupPath), 0755); err != nil { - return err - } - - // Create the backup file - f, err := os.Create(backupPath) - if err != nil { - return err - } - defer f.Close() - - src, err := os.Open(DefaultProfilePath) - if err != nil { - return err - } - defer src.Close() - - if _, err := io.Copy(f, src); err != nil { - return err - } - } - // Make sure /etc/apparmor.d exists if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil { return err diff --git a/pkg/beam/beam.go b/pkg/beam/beam.go index b1e4667a3f..2e8895a153 100644 --- a/pkg/beam/beam.go +++ b/pkg/beam/beam.go @@ -29,17 +29,48 @@ type ReceiveSender interface { Sender } -func SendPipe(dst Sender, data []byte) (*os.File, error) { +const ( + R = iota + W +) + +func sendPipe(dst Sender, data []byte, mode int) (*os.File, error) { r, w, err := os.Pipe() if err != nil { return nil, err } - if err := dst.Send(data, r); err != nil { - r.Close() - w.Close() + var ( + remote *os.File + local *os.File + ) + if mode == R { + remote = r + local = w + } else if mode == W { + remote = w + local = r + } + if err := dst.Send(data, remote); err != nil { + local.Close() + remote.Close() return nil, err } - return w, nil + return local, nil + +} + +// SendRPipe create a pipe and sends its *read* end attached in a beam message +// to `dst`, with `data` as the message payload. +// It returns the *write* end of the pipe, or an error. +func SendRPipe(dst Sender, data []byte) (*os.File, error) { + return sendPipe(dst, data, R) +} + +// SendWPipe create a pipe and sends its *read* end attached in a beam message +// to `dst`, with `data` as the message payload. +// It returns the *write* end of the pipe, or an error. +func SendWPipe(dst Sender, data []byte) (*os.File, error) { + return sendPipe(dst, data, W) } func SendConn(dst Sender, data []byte) (conn *UnixConn, err error) { diff --git a/pkg/beam/data/message.go b/pkg/beam/data/message.go index 193fb7b241..0ebe90295a 100644 --- a/pkg/beam/data/message.go +++ b/pkg/beam/data/message.go @@ -72,6 +72,16 @@ func (m Message) Get(k string) []string { return v } +// GetOne returns the last value added at the key k, +// or an empty string if there is no value. +func (m Message) GetOne(k string) string { + var v string + if vals := m.Get(k); len(vals) > 0 { + v = vals[len(vals)-1] + } + return v +} + func (m Message) Pretty() string { data, err := Decode(string(m)) if err != nil { diff --git a/pkg/beam/data/message_test.go b/pkg/beam/data/message_test.go index 7685769069..7224f33d11 100644 --- a/pkg/beam/data/message_test.go +++ b/pkg/beam/data/message_test.go @@ -51,3 +51,11 @@ func TestSetDelMessage(t *testing.T) { t.Fatalf("'%v' != '%v'", output, expectedOutput) } } + +func TestGetOne(t *testing.T) { + m := Empty().Set("shadok words", "ga", "bu", "zo", "meu") + val := m.GetOne("shadok words") + if val != "meu" { + t.Fatalf("%#v", val) + } +} diff --git a/pkg/beam/examples/beamsh/beamsh.go b/pkg/beam/examples/beamsh/beamsh.go index 3f258de332..808f038c68 100644 --- a/pkg/beam/examples/beamsh/beamsh.go +++ b/pkg/beam/examples/beamsh/beamsh.go @@ -257,12 +257,12 @@ func Handlers(sink beam.Sender) (*beam.UnixConn, error) { if handler == nil { return } - stdout, err := beam.SendPipe(conn, data.Empty().Set("cmd", "log", "stdout").Set("fromcmd", cmd...).Bytes()) + stdout, err := beam.SendRPipe(conn, data.Empty().Set("cmd", "log", "stdout").Set("fromcmd", cmd...).Bytes()) if err != nil { return } defer stdout.Close() - stderr, err := beam.SendPipe(conn, data.Empty().Set("cmd", "log", "stderr").Set("fromcmd", cmd...).Bytes()) + stderr, err := beam.SendRPipe(conn, data.Empty().Set("cmd", "log", "stderr").Set("fromcmd", cmd...).Bytes()) if err != nil { return } diff --git a/pkg/beam/examples/beamsh/builtins.go b/pkg/beam/examples/beamsh/builtins.go index cc94d2b5fb..3242237cc1 100644 --- a/pkg/beam/examples/beamsh/builtins.go +++ b/pkg/beam/examples/beamsh/builtins.go @@ -272,7 +272,7 @@ func CmdPrint(args []string, stdout, stderr io.Writer, in beam.Receiver, out bea } // Skip commands if a != nil && data.Message(payload).Get("cmd") == nil { - dup, err := beam.SendPipe(out, payload) + dup, err := beam.SendRPipe(out, payload) if err != nil { a.Close() return diff --git a/pkg/beam/router.go b/pkg/beam/router.go index fc41a8991b..15910e95b1 100644 --- a/pkg/beam/router.go +++ b/pkg/beam/router.go @@ -78,7 +78,7 @@ func (route *Route) Tee(dst Sender) *Route { return inner(payload, attachment) } // Setup the tee - w, err := SendPipe(dst, payload) + w, err := SendRPipe(dst, payload) if err != nil { return err } diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go deleted file mode 100644 index 0f93320725..0000000000 --- a/pkg/cgroups/cgroups.go +++ /dev/null @@ -1,30 +0,0 @@ -package cgroups - -import ( - "errors" -) - -var ( - ErrNotFound = errors.New("mountpoint not found") -) - -type Cgroup struct { - Name string `json:"name,omitempty"` - Parent string `json:"parent,omitempty"` - - DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice - Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) - MemoryReservation int64 `json:"memory_reservation,omitempty"` // Memory reservation or soft_limit (in bytes) - MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap - CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) - CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. - CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use - Freezer string `json:"freezer,omitempty"` // set the freeze value for the process - - Slice string `json:"slice,omitempty"` // Parent slice to use for systemd -} - -type ActiveCgroup interface { - Cleanup() error -} diff --git a/pkg/cgroups/fs/blkio.go b/pkg/cgroups/fs/blkio.go deleted file mode 100644 index 79e14fa2dc..0000000000 --- a/pkg/cgroups/fs/blkio.go +++ /dev/null @@ -1,121 +0,0 @@ -package fs - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/dotcloud/docker/pkg/cgroups" -) - -type blkioGroup struct { -} - -func (s *blkioGroup) Set(d *data) error { - // we just want to join this group even though we don't set anything - if _, err := d.join("blkio"); err != nil && err != cgroups.ErrNotFound { - return err - } - return nil -} - -func (s *blkioGroup) Remove(d *data) error { - return removePath(d.path("blkio")) -} - -/* -examples: - - blkio.sectors - 8:0 6792 - - blkio.io_service_bytes - 8:0 Read 1282048 - 8:0 Write 2195456 - 8:0 Sync 2195456 - 8:0 Async 1282048 - 8:0 Total 3477504 - Total 3477504 - - blkio.io_serviced - 8:0 Read 124 - 8:0 Write 104 - 8:0 Sync 104 - 8:0 Async 124 - 8:0 Total 228 - Total 228 - - blkio.io_queued - 8:0 Read 0 - 8:0 Write 0 - 8:0 Sync 0 - 8:0 Async 0 - 8:0 Total 0 - Total 0 -*/ -func (s *blkioGroup) Stats(d *data) (map[string]float64, error) { - var ( - paramData = make(map[string]float64) - params = []string{ - "io_service_bytes_recursive", - "io_serviced_recursive", - "io_queued_recursive", - } - ) - - path, err := d.path("blkio") - if err != nil { - return nil, err - } - - k, v, err := s.getSectors(path) - if err != nil { - return nil, err - } - paramData[fmt.Sprintf("blkio.sectors_recursive:%s", k)] = v - - for _, param := range params { - f, err := os.Open(filepath.Join(path, fmt.Sprintf("blkio.%s", param))) - if err != nil { - return nil, err - } - defer f.Close() - - sc := bufio.NewScanner(f) - for sc.Scan() { - // format: dev type amount - fields := strings.Fields(sc.Text()) - switch len(fields) { - case 3: - v, err := strconv.ParseFloat(fields[2], 64) - if err != nil { - return nil, err - } - paramData[fmt.Sprintf("%s:%s:%s", param, fields[0], fields[1])] = v - case 2: - // this is the total line, skip - default: - return nil, ErrNotValidFormat - } - } - } - return paramData, nil -} - -func (s *blkioGroup) getSectors(path string) (string, float64, error) { - f, err := os.Open(filepath.Join(path, "blkio.sectors_recursive")) - if err != nil { - return "", 0, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return "", 0, err - } - return getCgroupParamKeyValue(string(data)) -} diff --git a/pkg/cgroups/fs/cpu_test.go b/pkg/cgroups/fs/cpu_test.go deleted file mode 100644 index 698ae921d8..0000000000 --- a/pkg/cgroups/fs/cpu_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package fs - -import ( - "testing" -) - -func TestCpuStats(t *testing.T) { - helper := NewCgroupTestUtil("cpu", t) - defer helper.cleanup() - cpuStatContent := `nr_periods 2000 - nr_throttled 200 - throttled_time 42424242424` - helper.writeFileContents(map[string]string{ - "cpu.stat": cpuStatContent, - }) - - cpu := &cpuGroup{} - stats, err := cpu.Stats(helper.CgroupData) - if err != nil { - t.Fatal(err) - } - - expected_stats := map[string]float64{ - "nr_periods": 2000.0, - "nr_throttled": 200.0, - "throttled_time": 42424242424.0, - } - expectStats(t, expected_stats, stats) -} - -func TestNoCpuStatFile(t *testing.T) { - helper := NewCgroupTestUtil("cpu", t) - defer helper.cleanup() - - cpu := &cpuGroup{} - _, err := cpu.Stats(helper.CgroupData) - if err == nil { - t.Fatal("Expected to fail, but did not.") - } -} - -func TestInvalidCpuStat(t *testing.T) { - helper := NewCgroupTestUtil("cpu", t) - defer helper.cleanup() - cpuStatContent := `nr_periods 2000 - nr_throttled 200 - throttled_time fortytwo` - helper.writeFileContents(map[string]string{ - "cpu.stat": cpuStatContent, - }) - - cpu := &cpuGroup{} - _, err := cpu.Stats(helper.CgroupData) - if err == nil { - t.Fatal("Expected failed stat parsing.") - } -} diff --git a/pkg/cgroups/fs/cpuacct.go b/pkg/cgroups/fs/cpuacct.go deleted file mode 100644 index 892b5ab6b1..0000000000 --- a/pkg/cgroups/fs/cpuacct.go +++ /dev/null @@ -1,143 +0,0 @@ -package fs - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "time" - - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/pkg/system" -) - -var ( - cpuCount = float64(runtime.NumCPU()) - clockTicks = float64(system.GetClockTicks()) -) - -type cpuacctGroup struct { -} - -func (s *cpuacctGroup) Set(d *data) error { - // we just want to join this group even though we don't set anything - if _, err := d.join("cpuacct"); err != nil && err != cgroups.ErrNotFound { - return err - } - return nil -} - -func (s *cpuacctGroup) Remove(d *data) error { - return removePath(d.path("cpuacct")) -} - -func (s *cpuacctGroup) Stats(d *data) (map[string]float64, error) { - var ( - startCpu, lastCpu, startSystem, lastSystem, startUsage, lastUsage float64 - percentage float64 - paramData = make(map[string]float64) - ) - path, err := d.path("cpuacct") - if startCpu, err = s.getCpuUsage(d, path); err != nil { - return nil, err - } - if startSystem, err = s.getSystemCpuUsage(d); err != nil { - return nil, err - } - startUsageTime := time.Now() - if startUsage, err = getCgroupParamFloat64(path, "cpuacct.usage"); err != nil { - return nil, err - } - // sample for 100ms - time.Sleep(100 * time.Millisecond) - if lastCpu, err = s.getCpuUsage(d, path); err != nil { - return nil, err - } - if lastSystem, err = s.getSystemCpuUsage(d); err != nil { - return nil, err - } - usageSampleDuration := time.Since(startUsageTime) - if lastUsage, err = getCgroupParamFloat64(path, "cpuacct.usage"); err != nil { - return nil, err - } - - var ( - deltaProc = lastCpu - startCpu - deltaSystem = lastSystem - startSystem - deltaUsage = lastUsage - startUsage - ) - if deltaSystem > 0.0 { - percentage = ((deltaProc / deltaSystem) * clockTicks) * cpuCount - } - // NOTE: a percentage over 100% is valid for POSIX because that means the - // processes is using multiple cores - paramData["percentage"] = percentage - - // Delta usage is in nanoseconds of CPU time so get the usage (in cores) over the sample time. - paramData["usage"] = deltaUsage / float64(usageSampleDuration.Nanoseconds()) - return paramData, nil -} - -func (s *cpuacctGroup) getProcStarttime(d *data) (float64, error) { - rawStart, err := system.GetProcessStartTime(d.pid) - if err != nil { - return 0, err - } - return strconv.ParseFloat(rawStart, 64) -} - -func (s *cpuacctGroup) getSystemCpuUsage(d *data) (float64, error) { - - f, err := os.Open("/proc/stat") - if err != nil { - return 0, err - } - defer f.Close() - - sc := bufio.NewScanner(f) - for sc.Scan() { - parts := strings.Fields(sc.Text()) - switch parts[0] { - case "cpu": - if len(parts) < 8 { - return 0, fmt.Errorf("invalid number of cpu fields") - } - - var total float64 - for _, i := range parts[1:8] { - v, err := strconv.ParseFloat(i, 64) - if err != nil { - return 0.0, fmt.Errorf("Unable to convert value %s to float: %s", i, err) - } - total += v - } - return total, nil - default: - continue - } - } - return 0, fmt.Errorf("invalid stat format") -} - -func (s *cpuacctGroup) getCpuUsage(d *data, path string) (float64, error) { - cpuTotal := 0.0 - f, err := os.Open(filepath.Join(path, "cpuacct.stat")) - if err != nil { - return 0.0, err - } - defer f.Close() - - sc := bufio.NewScanner(f) - for sc.Scan() { - _, v, err := getCgroupParamKeyValue(sc.Text()) - if err != nil { - return 0.0, err - } - // set the raw data in map - cpuTotal += v - } - return cpuTotal, nil -} diff --git a/pkg/cgroups/fs/cpuset.go b/pkg/cgroups/fs/cpuset.go deleted file mode 100644 index 8a13c56cea..0000000000 --- a/pkg/cgroups/fs/cpuset.go +++ /dev/null @@ -1,36 +0,0 @@ -package fs - -import ( - "os" -) - -type cpusetGroup struct { -} - -func (s *cpusetGroup) Set(d *data) error { - // we don't want to join this cgroup unless it is specified - if d.c.CpusetCpus != "" { - dir, err := d.join("cpuset") - if err != nil && d.c.CpusetCpus != "" { - return err - } - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - if err := writeFile(dir, "cpuset.cpus", d.c.CpusetCpus); err != nil { - return err - } - } - return nil -} - -func (s *cpusetGroup) Remove(d *data) error { - return removePath(d.path("cpuset")) -} - -func (s *cpusetGroup) Stats(d *data) (map[string]float64, error) { - return nil, ErrNotSupportStat -} diff --git a/pkg/cgroups/fs/devices.go b/pkg/cgroups/fs/devices.go deleted file mode 100644 index a2f91eda14..0000000000 --- a/pkg/cgroups/fs/devices.go +++ /dev/null @@ -1,69 +0,0 @@ -package fs - -import ( - "os" -) - -type devicesGroup struct { -} - -func (s *devicesGroup) Set(d *data) error { - dir, err := d.join("devices") - if err != nil { - return err - } - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - if !d.c.DeviceAccess { - if err := writeFile(dir, "devices.deny", "a"); err != nil { - return err - } - - allow := []string{ - // allow mknod for any device - "c *:* m", - "b *:* m", - - // /dev/null, zero, full - "c 1:3 rwm", - "c 1:5 rwm", - "c 1:7 rwm", - - // consoles - "c 5:1 rwm", - "c 5:0 rwm", - "c 4:0 rwm", - "c 4:1 rwm", - - // /dev/urandom,/dev/random - "c 1:9 rwm", - "c 1:8 rwm", - - // /dev/pts/ - pts namespaces are "coming soon" - "c 136:* rwm", - "c 5:2 rwm", - - // tuntap - "c 10:200 rwm", - } - - for _, val := range allow { - if err := writeFile(dir, "devices.allow", val); err != nil { - return err - } - } - } - return nil -} - -func (s *devicesGroup) Remove(d *data) error { - return removePath(d.path("devices")) -} - -func (s *devicesGroup) Stats(d *data) (map[string]float64, error) { - return nil, ErrNotSupportStat -} diff --git a/pkg/cgroups/fs/freezer.go b/pkg/cgroups/fs/freezer.go deleted file mode 100644 index 70cfcdde72..0000000000 --- a/pkg/cgroups/fs/freezer.go +++ /dev/null @@ -1,72 +0,0 @@ -package fs - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/dotcloud/docker/pkg/cgroups" -) - -type freezerGroup struct { -} - -func (s *freezerGroup) Set(d *data) error { - dir, err := d.join("freezer") - if err != nil { - if err != cgroups.ErrNotFound { - return err - } - return nil - } - - if d.c.Freezer != "" { - if err := writeFile(dir, "freezer.state", d.c.Freezer); err != nil { - return err - } - } - return nil -} - -func (s *freezerGroup) Remove(d *data) error { - return removePath(d.path("freezer")) -} - -func (s *freezerGroup) Stats(d *data) (map[string]float64, error) { - var ( - paramData = make(map[string]float64) - params = []string{ - "parent_freezing", - "self_freezing", - // comment out right now because this is string "state", - } - ) - - path, err := d.path("freezer") - if err != nil { - return nil, err - } - - for _, param := range params { - f, err := os.Open(filepath.Join(path, fmt.Sprintf("freezer.%s", param))) - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - v, err := strconv.ParseFloat(strings.TrimSuffix(string(data), "\n"), 64) - if err != nil { - return nil, err - } - paramData[param] = v - } - return paramData, nil -} diff --git a/pkg/cgroups/systemd/apply_nosystemd.go b/pkg/cgroups/systemd/apply_nosystemd.go deleted file mode 100644 index 4faa749745..0000000000 --- a/pkg/cgroups/systemd/apply_nosystemd.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package systemd - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/cgroups" -) - -func UseSystemd() bool { - return false -} - -func Apply(c *Cgroup, pid int) (cgroups.ActiveCgroup, error) { - return nil, fmt.Errorf("Systemd not supported") -} diff --git a/pkg/collections/orderedintset.go b/pkg/collections/orderedintset.go deleted file mode 100644 index 23abab04d3..0000000000 --- a/pkg/collections/orderedintset.go +++ /dev/null @@ -1,96 +0,0 @@ -package collections - -import ( - "sync" -) - -// OrderedIntSet is a thread-safe sorted set and a stack. -type OrderedIntSet struct { - sync.RWMutex - set []int -} - -// NewOrderedSet returns an initialized OrderedSet -func NewOrderedIntSet() *OrderedIntSet { - return &OrderedIntSet{} -} - -// Push takes a string and adds it to the set. If the elem aready exists, it has no effect. -func (s *OrderedIntSet) Push(elem int) { - s.RLock() - for _, e := range s.set { - if e == elem { - s.RUnlock() - return - } - } - s.RUnlock() - - s.Lock() - - // Make sure the list is always sorted - for i, e := range s.set { - if elem < e { - s.set = append(s.set[:i], append([]int{elem}, s.set[i:]...)...) - s.Unlock() - return - } - } - // If we reach here, then elem is the biggest elem of the list. - s.set = append(s.set, elem) - s.Unlock() -} - -// Pop is an alias to PopFront() -func (s *OrderedIntSet) Pop() int { - return s.PopFront() -} - -// Pop returns the first elemen from the list and removes it. -// If the list is empty, it returns 0 -func (s *OrderedIntSet) PopFront() int { - s.RLock() - - for i, e := range s.set { - ret := e - s.RUnlock() - s.Lock() - s.set = append(s.set[:i], s.set[i+1:]...) - s.Unlock() - return ret - } - s.RUnlock() - - return 0 -} - -// PullBack retrieve the last element of the list. -// The element is not removed. -// If the list is empty, an empty element is returned. -func (s *OrderedIntSet) PullBack() int { - if len(s.set) == 0 { - return 0 - } - return s.set[len(s.set)-1] -} - -// Exists checks if the given element present in the list. -func (s *OrderedIntSet) Exists(elem int) bool { - for _, e := range s.set { - if e == elem { - return true - } - } - return false -} - -// Remove removes an element from the list. -// If the element is not found, it has no effect. -func (s *OrderedIntSet) Remove(elem int) { - for i, e := range s.set { - if e == elem { - s.set = append(s.set[:i], s.set[i+1:]...) - return - } - } -} diff --git a/pkg/graphdb/conn_sqlite3.go b/pkg/graphdb/conn_sqlite3.go index 33355ae4dc..b6a8027a81 100644 --- a/pkg/graphdb/conn_sqlite3.go +++ b/pkg/graphdb/conn_sqlite3.go @@ -3,23 +3,32 @@ package graphdb import ( - _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite "database/sql" "os" + + _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite ) func NewSqliteConn(root string) (*Database, error) { initDatabase := false - if _, err := os.Stat(root); err != nil { + + stat, err := os.Stat(root) + if err != nil { if os.IsNotExist(err) { initDatabase = true } else { return nil, err } } + + if stat != nil && stat.Size() == 0 { + initDatabase = true + } + conn, err := sql.Open("sqlite3", root) if err != nil { return nil, err } + return NewDatabase(conn, initDatabase) } diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index 4cdd67ef7c..748b69a444 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -20,6 +20,7 @@ const ( var ( ErrIptablesNotFound = errors.New("Iptables not found") nat = []string{"-t", "nat"} + supportsXlock = false ) type Chain struct { @@ -27,6 +28,10 @@ type Chain struct { Bridge string } +func init() { + supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil +} + func NewChain(name, bridge string) (*Chain, error) { if output, err := Raw("-t", "nat", "-N", name); err != nil { return nil, err @@ -147,12 +152,24 @@ func Raw(args ...string) ([]byte, error) { if err != nil { return nil, ErrIptablesNotFound } - if os.Getenv("DEBUG") != "" { - fmt.Printf("[DEBUG] [iptables]: %s, %v\n", path, args) + + if supportsXlock { + args = append([]string{"--wait"}, args...) } + + if os.Getenv("DEBUG") != "" { + fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s, %v\n", path, args)) + } + output, err := exec.Command(path, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err) } + + // ignore iptables' message about xtables lock + if strings.Contains(string(output), "waiting for it to exit") { + output = []byte("") + } + return output, err } diff --git a/pkg/libcontainer/cgroups/cgroups.go b/pkg/libcontainer/cgroups/cgroups.go new file mode 100644 index 0000000000..85607b548a --- /dev/null +++ b/pkg/libcontainer/cgroups/cgroups.go @@ -0,0 +1,40 @@ +package cgroups + +import ( + "errors" + + "github.com/dotcloud/docker/pkg/libcontainer/devices" +) + +var ( + ErrNotFound = errors.New("mountpoint not found") +) + +type FreezerState string + +const ( + Undefined FreezerState = "" + Frozen FreezerState = "FROZEN" + Thawed FreezerState = "THAWED" +) + +type Cgroup struct { + Name string `json:"name,omitempty"` + Parent string `json:"parent,omitempty"` // name of parent cgroup or slice + + AllowAllDevices bool `json:"allow_all_devices,omitempty"` // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list. + AllowedDevices []*devices.Device `json:"allowed_devices,omitempty"` + Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) + MemoryReservation int64 `json:"memory_reservation,omitempty"` // Memory reservation or soft_limit (in bytes) + MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) + CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. + CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use + Freezer FreezerState `json:"freezer,omitempty"` // set the freeze value for the process + Slice string `json:"slice,omitempty"` // Parent slice to use for systemd +} + +type ActiveCgroup interface { + Cleanup() error +} diff --git a/pkg/cgroups/cgroups_test.go b/pkg/libcontainer/cgroups/cgroups_test.go similarity index 100% rename from pkg/cgroups/cgroups_test.go rename to pkg/libcontainer/cgroups/cgroups_test.go diff --git a/pkg/cgroups/fs/apply_raw.go b/pkg/libcontainer/cgroups/fs/apply_raw.go similarity index 60% rename from pkg/cgroups/fs/apply_raw.go rename to pkg/libcontainer/cgroups/fs/apply_raw.go index 5f9fc826b3..2231ab9a38 100644 --- a/pkg/cgroups/fs/apply_raw.go +++ b/pkg/libcontainer/cgroups/fs/apply_raw.go @@ -7,7 +7,7 @@ import ( "path/filepath" "strconv" - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) var ( @@ -26,7 +26,7 @@ var ( type subsystem interface { Set(*data) error Remove(*data) error - Stats(*data) (map[string]float64, error) + GetStats(*data, *cgroups.Stats) error } type data struct { @@ -37,44 +37,69 @@ type data struct { } func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { - // We have two implementation of cgroups support, one is based on - // systemd and the dbus api, and one is based on raw cgroup fs operations - // following the pre-single-writer model docs at: - // http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/ - // - // we can pick any subsystem to find the root - - cgroupRoot, err := cgroups.FindCgroupMountpoint("cpu") + d, err := getCgroupData(c, pid) if err != nil { return nil, err } - cgroupRoot = filepath.Dir(cgroupRoot) - if _, err := os.Stat(cgroupRoot); err != nil { - return nil, fmt.Errorf("cgroups fs not found") - } - - cgroup := c.Name - if c.Parent != "" { - cgroup = filepath.Join(c.Parent, cgroup) - } - - d := &data{ - root: cgroupRoot, - cgroup: cgroup, - c: c, - pid: pid, - } for _, sys := range subsystems { if err := sys.Set(d); err != nil { d.Cleanup() return nil, err } } + return d, nil } -func GetStats(c *cgroups.Cgroup, subsystem string, pid int) (map[string]float64, error) { +func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { + stats := cgroups.NewStats() + + d, err := getCgroupData(c, 0) + if err != nil { + return nil, err + } + + for _, sys := range subsystems { + if err := sys.GetStats(d, stats); err != nil { + return nil, err + } + } + + return stats, nil +} + +// Freeze toggles the container's freezer cgroup depending on the state +// provided +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + d, err := getCgroupData(c, 0) + if err != nil { + return err + } + + c.Freezer = state + + freezer := subsystems["freezer"] + + return freezer.Set(d) +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + d, err := getCgroupData(c, 0) + if err != nil { + return nil, err + } + + dir, err := d.path("devices") + if err != nil { + return nil, err + } + + return cgroups.ReadProcsFile(dir) +} + +func getCgroupData(c *cgroups.Cgroup, pid int) (*data, error) { + // we can pick any subsystem to find the root cgroupRoot, err := cgroups.FindCgroupMountpoint("cpu") if err != nil { return nil, err @@ -90,25 +115,28 @@ func GetStats(c *cgroups.Cgroup, subsystem string, pid int) (map[string]float64, cgroup = filepath.Join(c.Parent, cgroup) } - d := &data{ + return &data{ root: cgroupRoot, cgroup: cgroup, c: c, pid: pid, - } - sys, exists := subsystems[subsystem] - if !exists { - return nil, fmt.Errorf("subsystem %s does not exist", subsystem) - } - return sys.Stats(d) + }, nil } -func (raw *data) path(subsystem string) (string, error) { +func (raw *data) parent(subsystem string) (string, error) { initPath, err := cgroups.GetInitCgroupDir(subsystem) if err != nil { return "", err } - return filepath.Join(raw.root, subsystem, initPath, raw.cgroup), nil + return filepath.Join(raw.root, subsystem, initPath), nil +} + +func (raw *data) path(subsystem string) (string, error) { + parent, err := raw.parent(subsystem) + if err != nil { + return "", err + } + return filepath.Join(parent, raw.cgroup), nil } func (raw *data) join(subsystem string) (string, error) { @@ -136,6 +164,11 @@ func writeFile(dir, file, data string) error { return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) } +func readFile(dir, file string) (string, error) { + data, err := ioutil.ReadFile(filepath.Join(dir, file)) + return string(data), err +} + func removePath(p string, err error) error { if err != nil { return err diff --git a/pkg/libcontainer/cgroups/fs/blkio.go b/pkg/libcontainer/cgroups/fs/blkio.go new file mode 100644 index 0000000000..0c7a4e7b39 --- /dev/null +++ b/pkg/libcontainer/cgroups/fs/blkio.go @@ -0,0 +1,142 @@ +package fs + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" +) + +type blkioGroup struct { +} + +func (s *blkioGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("blkio"); err != nil && err != cgroups.ErrNotFound { + return err + } + return nil +} + +func (s *blkioGroup) Remove(d *data) error { + return removePath(d.path("blkio")) +} + +/* +examples: + + blkio.sectors + 8:0 6792 + + blkio.io_service_bytes + 8:0 Read 1282048 + 8:0 Write 2195456 + 8:0 Sync 2195456 + 8:0 Async 1282048 + 8:0 Total 3477504 + Total 3477504 + + blkio.io_serviced + 8:0 Read 124 + 8:0 Write 104 + 8:0 Sync 104 + 8:0 Async 124 + 8:0 Total 228 + Total 228 + + blkio.io_queued + 8:0 Read 0 + 8:0 Write 0 + 8:0 Sync 0 + 8:0 Async 0 + 8:0 Total 0 + Total 0 +*/ + +func splitBlkioStatLine(r rune) bool { + return r == ' ' || r == ':' +} + +func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) { + var blkioStats []cgroups.BlkioStatEntry + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + // format: dev type amount + fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine) + if len(fields) < 3 { + if len(fields) == 2 && fields[0] == "Total" { + // skip total line + continue + } else { + return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) + } + } + + v, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + major := v + + v, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + minor := v + + op := "" + valueField := 2 + if len(fields) == 4 { + op = fields[2] + valueField = 3 + } + v, err = strconv.ParseUint(fields[valueField], 10, 64) + if err != nil { + return nil, err + } + blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v}) + } + + return blkioStats, nil +} + +func (s *blkioGroup) GetStats(d *data, stats *cgroups.Stats) error { + var blkioStats []cgroups.BlkioStatEntry + var err error + path, err := d.path("blkio") + if err != nil { + return err + } + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil { + return err + } + stats.BlkioStats.SectorsRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServiceBytesRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServicedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil { + return err + } + stats.BlkioStats.IoQueuedRecursive = blkioStats + + return nil +} diff --git a/pkg/cgroups/fs/blkio_test.go b/pkg/libcontainer/cgroups/fs/blkio_test.go similarity index 63% rename from pkg/cgroups/fs/blkio_test.go rename to pkg/libcontainer/cgroups/fs/blkio_test.go index 5279ac437b..d91a6479a9 100644 --- a/pkg/cgroups/fs/blkio_test.go +++ b/pkg/libcontainer/cgroups/fs/blkio_test.go @@ -2,14 +2,16 @@ package fs import ( "testing" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) const ( sectorsRecursiveContents = `8:0 1024` serviceBytesRecursiveContents = `8:0 Read 100 -8:0 Write 400 -8:0 Sync 200 -8:0 Async 300 +8:0 Write 200 +8:0 Sync 300 +8:0 Async 500 8:0 Total 500 Total 500` servicedRecursiveContents = `8:0 Read 10 @@ -26,6 +28,12 @@ Total 50` Total 5` ) +var actualStats = *cgroups.NewStats() + +func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) { + *blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op}) +} + func TestBlkioStats(t *testing.T) { helper := NewCgroupTestUtil("blkio", t) defer helper.cleanup() @@ -37,37 +45,34 @@ func TestBlkioStats(t *testing.T) { }) blkio := &blkioGroup{} - stats, err := blkio.Stats(helper.CgroupData) + err := blkio.GetStats(helper.CgroupData, &actualStats) if err != nil { t.Fatal(err) } // Verify expected stats. - expectedStats := map[string]float64{ - "blkio.sectors_recursive:8:0": 1024.0, + expectedStats := cgroups.BlkioStats{} + appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "") - // Serviced bytes. - "io_service_bytes_recursive:8:0:Read": 100.0, - "io_service_bytes_recursive:8:0:Write": 400.0, - "io_service_bytes_recursive:8:0:Sync": 200.0, - "io_service_bytes_recursive:8:0:Async": 300.0, - "io_service_bytes_recursive:8:0:Total": 500.0, + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total") - // Serviced requests. - "io_serviced_recursive:8:0:Read": 10.0, - "io_serviced_recursive:8:0:Write": 40.0, - "io_serviced_recursive:8:0:Sync": 20.0, - "io_serviced_recursive:8:0:Async": 30.0, - "io_serviced_recursive:8:0:Total": 50.0, + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total") - // Queued requests. - "io_queued_recursive:8:0:Read": 1.0, - "io_queued_recursive:8:0:Write": 4.0, - "io_queued_recursive:8:0:Sync": 2.0, - "io_queued_recursive:8:0:Async": 3.0, - "io_queued_recursive:8:0:Total": 5.0, - } - expectStats(t, expectedStats, stats) + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total") + + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) } func TestBlkioStatsNoSectorsFile(t *testing.T) { @@ -80,7 +85,7 @@ func TestBlkioStatsNoSectorsFile(t *testing.T) { }) blkio := &blkioGroup{} - _, err := blkio.Stats(helper.CgroupData) + err := blkio.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected to fail, but did not") } @@ -96,7 +101,7 @@ func TestBlkioStatsNoServiceBytesFile(t *testing.T) { }) blkio := &blkioGroup{} - _, err := blkio.Stats(helper.CgroupData) + err := blkio.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected to fail, but did not") } @@ -112,7 +117,7 @@ func TestBlkioStatsNoServicedFile(t *testing.T) { }) blkio := &blkioGroup{} - _, err := blkio.Stats(helper.CgroupData) + err := blkio.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected to fail, but did not") } @@ -128,7 +133,7 @@ func TestBlkioStatsNoQueuedFile(t *testing.T) { }) blkio := &blkioGroup{} - _, err := blkio.Stats(helper.CgroupData) + err := blkio.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected to fail, but did not") } @@ -145,7 +150,7 @@ func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) { }) blkio := &blkioGroup{} - _, err := blkio.Stats(helper.CgroupData) + err := blkio.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected to fail, but did not") } @@ -162,7 +167,7 @@ func TestBlkioStatsUnexpectedFieldType(t *testing.T) { }) blkio := &blkioGroup{} - _, err := blkio.Stats(helper.CgroupData) + err := blkio.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected to fail, but did not") } diff --git a/pkg/cgroups/fs/cpu.go b/pkg/libcontainer/cgroups/fs/cpu.go similarity index 67% rename from pkg/cgroups/fs/cpu.go rename to pkg/libcontainer/cgroups/fs/cpu.go index 6a7f66c72d..2e9d588f4f 100644 --- a/pkg/cgroups/fs/cpu.go +++ b/pkg/libcontainer/cgroups/fs/cpu.go @@ -5,6 +5,9 @@ import ( "os" "path/filepath" "strconv" + "syscall" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) type cpuGroup struct { @@ -39,16 +42,18 @@ func (s *cpuGroup) Remove(d *data) error { return removePath(d.path("cpu")) } -func (s *cpuGroup) Stats(d *data) (map[string]float64, error) { - paramData := make(map[string]float64) +func (s *cpuGroup) GetStats(d *data, stats *cgroups.Stats) error { path, err := d.path("cpu") if err != nil { - return nil, err + return err } f, err := os.Open(filepath.Join(path, "cpu.stat")) if err != nil { - return nil, err + if pathErr, ok := err.(*os.PathError); ok && pathErr.Err == syscall.ENOENT { + return nil + } + return err } defer f.Close() @@ -56,9 +61,18 @@ func (s *cpuGroup) Stats(d *data) (map[string]float64, error) { for sc.Scan() { t, v, err := getCgroupParamKeyValue(sc.Text()) if err != nil { - return nil, err + return err + } + switch t { + case "nr_periods": + stats.CpuStats.ThrottlingData.Periods = v + + case "nr_throttled": + stats.CpuStats.ThrottlingData.ThrottledPeriods = v + + case "throttled_time": + stats.CpuStats.ThrottlingData.ThrottledTime = v } - paramData[t] = v } - return paramData, nil + return nil } diff --git a/pkg/libcontainer/cgroups/fs/cpu_test.go b/pkg/libcontainer/cgroups/fs/cpu_test.go new file mode 100644 index 0000000000..c5bfc50409 --- /dev/null +++ b/pkg/libcontainer/cgroups/fs/cpu_test.go @@ -0,0 +1,66 @@ +package fs + +import ( + "fmt" + "testing" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" +) + +func TestCpuStats(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + const ( + kNrPeriods = 2000 + kNrThrottled = 200 + kThrottledTime = uint64(18446744073709551615) + ) + + cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n", + kNrPeriods, kNrThrottled, kThrottledTime) + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &cpuGroup{} + err := cpu.GetStats(helper.CgroupData, &actualStats) + if err != nil { + t.Fatal(err) + } + + expectedStats := cgroups.ThrottlingData{ + Periods: kNrPeriods, + ThrottledPeriods: kNrThrottled, + ThrottledTime: kThrottledTime} + + expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData) +} + +func TestNoCpuStatFile(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + cpu := &cpuGroup{} + err := cpu.GetStats(helper.CgroupData, &actualStats) + if err != nil { + t.Fatal("Expected not to fail, but did") + } +} + +func TestInvalidCpuStat(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + cpuStatContent := `nr_periods 2000 + nr_throttled 200 + throttled_time fortytwo` + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &cpuGroup{} + err := cpu.GetStats(helper.CgroupData, &actualStats) + if err == nil { + t.Fatal("Expected failed stat parsing.") + } +} diff --git a/pkg/libcontainer/cgroups/fs/cpuacct.go b/pkg/libcontainer/cgroups/fs/cpuacct.go new file mode 100644 index 0000000000..5ea01dc94a --- /dev/null +++ b/pkg/libcontainer/cgroups/fs/cpuacct.go @@ -0,0 +1,162 @@ +package fs + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" + "github.com/dotcloud/docker/pkg/system" +) + +var ( + cpuCount = uint64(runtime.NumCPU()) + clockTicks = uint64(system.GetClockTicks()) +) + +const nanosecondsInSecond = 1000000000 + +type cpuacctGroup struct { +} + +func (s *cpuacctGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("cpuacct"); err != nil && err != cgroups.ErrNotFound { + return err + } + return nil +} + +func (s *cpuacctGroup) Remove(d *data) error { + return removePath(d.path("cpuacct")) +} + +func (s *cpuacctGroup) GetStats(d *data, stats *cgroups.Stats) error { + var ( + startCpu, lastCpu, startSystem, lastSystem, startUsage, lastUsage, kernelModeUsage, userModeUsage, percentage uint64 + ) + path, err := d.path("cpuacct") + if kernelModeUsage, userModeUsage, err = s.getCpuUsage(d, path); err != nil { + return err + } + startCpu = kernelModeUsage + userModeUsage + if startSystem, err = s.getSystemCpuUsage(d); err != nil { + return err + } + startUsageTime := time.Now() + if startUsage, err = getCgroupParamInt(path, "cpuacct.usage"); err != nil { + return err + } + // sample for 100ms + time.Sleep(100 * time.Millisecond) + if kernelModeUsage, userModeUsage, err = s.getCpuUsage(d, path); err != nil { + return err + } + lastCpu = kernelModeUsage + userModeUsage + if lastSystem, err = s.getSystemCpuUsage(d); err != nil { + return err + } + usageSampleDuration := time.Since(startUsageTime) + if lastUsage, err = getCgroupParamInt(path, "cpuacct.usage"); err != nil { + return err + } + + var ( + deltaProc = lastCpu - startCpu + deltaSystem = lastSystem - startSystem + deltaUsage = lastUsage - startUsage + ) + if deltaSystem > 0.0 { + percentage = ((deltaProc / deltaSystem) * clockTicks) * cpuCount + } + // NOTE: a percentage over 100% is valid for POSIX because that means the + // processes is using multiple cores + stats.CpuStats.CpuUsage.PercentUsage = percentage + // Delta usage is in nanoseconds of CPU time so get the usage (in cores) over the sample time. + stats.CpuStats.CpuUsage.CurrentUsage = deltaUsage / uint64(usageSampleDuration.Nanoseconds()) + percpuUsage, err := s.getPercpuUsage(path) + if err != nil { + return err + } + stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage + stats.CpuStats.CpuUsage.UsageInKernelmode = (kernelModeUsage * nanosecondsInSecond) / clockTicks + stats.CpuStats.CpuUsage.UsageInUsermode = (userModeUsage * nanosecondsInSecond) / clockTicks + return nil +} + +// TODO(vmarmol): Use cgroups stats. +func (s *cpuacctGroup) getSystemCpuUsage(d *data) (uint64, error) { + + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + parts := strings.Fields(sc.Text()) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + + var total uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0.0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + total += v + } + return total, nil + default: + continue + } + } + return 0, fmt.Errorf("invalid stat format") +} + +func (s *cpuacctGroup) getCpuUsage(d *data, path string) (uint64, uint64, error) { + kernelModeUsage := uint64(0) + userModeUsage := uint64(0) + data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.stat")) + if err != nil { + return 0, 0, err + } + fields := strings.Fields(string(data)) + if len(fields) != 4 { + return 0, 0, fmt.Errorf("Failure - %s is expected to have 4 fields", filepath.Join(path, "cpuacct.stat")) + } + if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil { + return 0, 0, err + } + if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil { + return 0, 0, err + } + + return kernelModeUsage, userModeUsage, nil +} + +func (s *cpuacctGroup) getPercpuUsage(path string) ([]uint64, error) { + percpuUsage := []uint64{} + data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu")) + if err != nil { + return percpuUsage, err + } + for _, value := range strings.Fields(string(data)) { + value, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err) + } + percpuUsage = append(percpuUsage, value) + } + return percpuUsage, nil +} diff --git a/pkg/libcontainer/cgroups/fs/cpuset.go b/pkg/libcontainer/cgroups/fs/cpuset.go new file mode 100644 index 0000000000..c0b03c559e --- /dev/null +++ b/pkg/libcontainer/cgroups/fs/cpuset.go @@ -0,0 +1,110 @@ +package fs + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" +) + +type cpusetGroup struct { +} + +func (s *cpusetGroup) Set(d *data) error { + // we don't want to join this cgroup unless it is specified + if d.c.CpusetCpus != "" { + dir, err := d.path("cpuset") + if err != nil { + return err + } + if err := s.ensureParent(dir); err != nil { + return err + } + + // because we are not using d.join we need to place the pid into the procs file + // unlike the other subsystems + if err := writeFile(dir, "cgroup.procs", strconv.Itoa(d.pid)); err != nil { + return err + } + if err := writeFile(dir, "cpuset.cpus", d.c.CpusetCpus); err != nil { + return err + } + } + return nil +} + +func (s *cpusetGroup) Remove(d *data) error { + return removePath(d.path("cpuset")) +} + +func (s *cpusetGroup) GetStats(d *data, stats *cgroups.Stats) error { + return nil +} + +func (s *cpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { + if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil { + return + } + if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil { + return + } + return cpus, mems, nil +} + +// ensureParent ensures that the parent directory of current is created +// with the proper cpus and mems files copied from it's parent if the values +// are a file with a new line char +func (s *cpusetGroup) ensureParent(current string) error { + parent := filepath.Dir(current) + + if _, err := os.Stat(parent); err != nil { + if !os.IsNotExist(err) { + return err + } + + if err := s.ensureParent(parent); err != nil { + return err + } + } + + if err := os.MkdirAll(current, 0755); err != nil && !os.IsExist(err) { + return err + } + return s.copyIfNeeded(current, parent) +} + +// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent +// directory to the current directory if the file's contents are 0 +func (s *cpusetGroup) copyIfNeeded(current, parent string) error { + var ( + err error + currentCpus, currentMems []byte + parentCpus, parentMems []byte + ) + + if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { + return err + } + if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { + return err + } + + if s.isEmpty(currentCpus) { + if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { + return err + } + } + if s.isEmpty(currentMems) { + if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { + return err + } + } + return nil +} + +func (s *cpusetGroup) isEmpty(b []byte) bool { + return len(bytes.Trim(b, "\n")) == 0 +} diff --git a/pkg/libcontainer/cgroups/fs/devices.go b/pkg/libcontainer/cgroups/fs/devices.go new file mode 100644 index 0000000000..45c3b48530 --- /dev/null +++ b/pkg/libcontainer/cgroups/fs/devices.go @@ -0,0 +1,34 @@ +package fs + +import "github.com/dotcloud/docker/pkg/libcontainer/cgroups" + +type devicesGroup struct { +} + +func (s *devicesGroup) Set(d *data) error { + dir, err := d.join("devices") + if err != nil { + return err + } + + if !d.c.AllowAllDevices { + if err := writeFile(dir, "devices.deny", "a"); err != nil { + return err + } + + for _, dev := range d.c.AllowedDevices { + if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return err + } + } + } + return nil +} + +func (s *devicesGroup) Remove(d *data) error { + return removePath(d.path("devices")) +} + +func (s *devicesGroup) GetStats(d *data, stats *cgroups.Stats) error { + return nil +} diff --git a/pkg/libcontainer/cgroups/fs/freezer.go b/pkg/libcontainer/cgroups/fs/freezer.go new file mode 100644 index 0000000000..5c9ba5b543 --- /dev/null +++ b/pkg/libcontainer/cgroups/fs/freezer.go @@ -0,0 +1,71 @@ +package fs + +import ( + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" +) + +type freezerGroup struct { +} + +func (s *freezerGroup) Set(d *data) error { + switch d.c.Freezer { + case cgroups.Frozen, cgroups.Thawed: + dir, err := d.path("freezer") + if err != nil { + return err + } + + if err := writeFile(dir, "freezer.state", string(d.c.Freezer)); err != nil { + return err + } + + for { + state, err := readFile(dir, "freezer.state") + if err != nil { + return err + } + if strings.TrimSpace(state) == string(d.c.Freezer) { + break + } + time.Sleep(1 * time.Millisecond) + } + default: + if _, err := d.join("freezer"); err != nil && err != cgroups.ErrNotFound { + return err + } + } + + return nil +} + +func (s *freezerGroup) Remove(d *data) error { + return removePath(d.path("freezer")) +} + +func getFreezerFileData(path string) (string, error) { + data, err := ioutil.ReadFile(path) + return strings.TrimSuffix(string(data), "\n"), err +} + +func (s *freezerGroup) GetStats(d *data, stats *cgroups.Stats) error { + path, err := d.path("freezer") + if err != nil { + return err + } + var data string + if data, err = getFreezerFileData(filepath.Join(path, "freezer.parent_freezing")); err != nil { + return err + } + stats.FreezerStats.ParentState = data + if data, err = getFreezerFileData(filepath.Join(path, "freezer.self_freezing")); err != nil { + return err + } + stats.FreezerStats.SelfState = data + + return nil +} diff --git a/pkg/cgroups/fs/memory.go b/pkg/libcontainer/cgroups/fs/memory.go similarity index 72% rename from pkg/cgroups/fs/memory.go rename to pkg/libcontainer/cgroups/fs/memory.go index 837640c088..202ddc8ede 100644 --- a/pkg/cgroups/fs/memory.go +++ b/pkg/libcontainer/cgroups/fs/memory.go @@ -2,10 +2,11 @@ package fs import ( "bufio" - "fmt" "os" "path/filepath" "strconv" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) type memoryGroup struct { @@ -50,17 +51,16 @@ func (s *memoryGroup) Remove(d *data) error { return removePath(d.path("memory")) } -func (s *memoryGroup) Stats(d *data) (map[string]float64, error) { - paramData := make(map[string]float64) +func (s *memoryGroup) GetStats(d *data, stats *cgroups.Stats) error { path, err := d.path("memory") if err != nil { - return nil, err + return err } // Set stats from memory.stat. statsFile, err := os.Open(filepath.Join(path, "memory.stat")) if err != nil { - return nil, err + return err } defer statsFile.Close() @@ -68,23 +68,27 @@ func (s *memoryGroup) Stats(d *data) (map[string]float64, error) { for sc.Scan() { t, v, err := getCgroupParamKeyValue(sc.Text()) if err != nil { - return nil, err + return err } - paramData[t] = v + stats.MemoryStats.Stats[t] = v } // Set memory usage and max historical usage. - params := []string{ - "usage_in_bytes", - "max_usage_in_bytes", + value, err := getCgroupParamInt(path, "memory.usage_in_bytes") + if err != nil { + return err } - for _, param := range params { - value, err := getCgroupParamFloat64(path, fmt.Sprintf("memory.%s", param)) - if err != nil { - return nil, err - } - paramData[param] = value + stats.MemoryStats.Usage = value + value, err = getCgroupParamInt(path, "memory.max_usage_in_bytes") + if err != nil { + return err } + stats.MemoryStats.MaxUsage = value + value, err = getCgroupParamInt(path, "memory.failcnt") + if err != nil { + return err + } + stats.MemoryStats.Failcnt = value - return paramData, nil + return nil } diff --git a/pkg/cgroups/fs/memory_test.go b/pkg/libcontainer/cgroups/fs/memory_test.go similarity index 78% rename from pkg/cgroups/fs/memory_test.go rename to pkg/libcontainer/cgroups/fs/memory_test.go index 6c1fb735e9..29aea1f219 100644 --- a/pkg/cgroups/fs/memory_test.go +++ b/pkg/libcontainer/cgroups/fs/memory_test.go @@ -2,6 +2,8 @@ package fs import ( "testing" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) const ( @@ -9,6 +11,7 @@ const ( rss 1024` memoryUsageContents = "2048\n" memoryMaxUsageContents = "4096\n" + memoryFailcnt = "100\n" ) func TestMemoryStats(t *testing.T) { @@ -18,15 +21,16 @@ func TestMemoryStats(t *testing.T) { "memory.stat": memoryStatContents, "memory.usage_in_bytes": memoryUsageContents, "memory.max_usage_in_bytes": memoryMaxUsageContents, + "memory.failcnt": memoryFailcnt, }) memory := &memoryGroup{} - stats, err := memory.Stats(helper.CgroupData) + err := memory.GetStats(helper.CgroupData, &actualStats) if err != nil { t.Fatal(err) } - expectedStats := map[string]float64{"cache": 512.0, "rss": 1024.0, "usage_in_bytes": 2048.0, "max_usage_in_bytes": 4096.0} - expectStats(t, expectedStats, stats) + expectedStats := cgroups.MemoryStats{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}} + expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats) } func TestMemoryStatsNoStatFile(t *testing.T) { @@ -38,7 +42,7 @@ func TestMemoryStatsNoStatFile(t *testing.T) { }) memory := &memoryGroup{} - _, err := memory.Stats(helper.CgroupData) + err := memory.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected failure") } @@ -53,7 +57,7 @@ func TestMemoryStatsNoUsageFile(t *testing.T) { }) memory := &memoryGroup{} - _, err := memory.Stats(helper.CgroupData) + err := memory.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected failure") } @@ -68,7 +72,7 @@ func TestMemoryStatsNoMaxUsageFile(t *testing.T) { }) memory := &memoryGroup{} - _, err := memory.Stats(helper.CgroupData) + err := memory.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected failure") } @@ -84,7 +88,7 @@ func TestMemoryStatsBadStatFile(t *testing.T) { }) memory := &memoryGroup{} - _, err := memory.Stats(helper.CgroupData) + err := memory.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected failure") } @@ -100,7 +104,7 @@ func TestMemoryStatsBadUsageFile(t *testing.T) { }) memory := &memoryGroup{} - _, err := memory.Stats(helper.CgroupData) + err := memory.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected failure") } @@ -116,7 +120,7 @@ func TestMemoryStatsBadMaxUsageFile(t *testing.T) { }) memory := &memoryGroup{} - _, err := memory.Stats(helper.CgroupData) + err := memory.GetStats(helper.CgroupData, &actualStats) if err == nil { t.Fatal("Expected failure") } diff --git a/pkg/cgroups/fs/perf_event.go b/pkg/libcontainer/cgroups/fs/perf_event.go similarity index 72% rename from pkg/cgroups/fs/perf_event.go rename to pkg/libcontainer/cgroups/fs/perf_event.go index 789b3e59ad..1eb4df11b5 100644 --- a/pkg/cgroups/fs/perf_event.go +++ b/pkg/libcontainer/cgroups/fs/perf_event.go @@ -1,7 +1,7 @@ package fs import ( - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) type perfEventGroup struct { @@ -19,6 +19,6 @@ func (s *perfEventGroup) Remove(d *data) error { return removePath(d.path("perf_event")) } -func (s *perfEventGroup) Stats(d *data) (map[string]float64, error) { - return nil, ErrNotSupportStat +func (s *perfEventGroup) GetStats(d *data, stats *cgroups.Stats) error { + return nil } diff --git a/pkg/libcontainer/cgroups/fs/stats_test_util.go b/pkg/libcontainer/cgroups/fs/stats_test_util.go new file mode 100644 index 0000000000..bebd0cb3e3 --- /dev/null +++ b/pkg/libcontainer/cgroups/fs/stats_test_util.go @@ -0,0 +1,73 @@ +package fs + +import ( + "fmt" + "log" + "testing" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" +) + +func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error { + if len(expected) != len(actual) { + return fmt.Errorf("blkioStatEntries length do not match") + } + for i, expValue := range expected { + actValue := actual[i] + if expValue != actValue { + return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue) + } + } + return nil +} + +func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) { + if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil { + log.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil { + log.Printf("blkio IoServicedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil { + log.Printf("blkio IoQueuedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil { + log.Printf("blkio SectorsRecursive do not match - %s\n", err) + t.Fail() + } +} + +func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) { + if expected != actual { + log.Printf("Expected throttling data %v but found %v\n", expected, actual) + t.Fail() + } +} + +func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) { + if expected.Usage != actual.Usage { + log.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage) + t.Fail() + } + if expected.MaxUsage != actual.MaxUsage { + log.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage) + t.Fail() + } + for key, expValue := range expected.Stats { + actValue, ok := actual.Stats[key] + if !ok { + log.Printf("Expected memory stat key %s not found\n", key) + t.Fail() + } + if expValue != actValue { + log.Printf("Expected memory stat value %d but found %d\n", expValue, actValue) + t.Fail() + } + } +} diff --git a/pkg/cgroups/fs/test_util.go b/pkg/libcontainer/cgroups/fs/test_util.go similarity index 75% rename from pkg/cgroups/fs/test_util.go rename to pkg/libcontainer/cgroups/fs/test_util.go index 11b90b21d6..548870a8a3 100644 --- a/pkg/cgroups/fs/test_util.go +++ b/pkg/libcontainer/cgroups/fs/test_util.go @@ -8,7 +8,6 @@ package fs import ( "fmt" "io/ioutil" - "log" "os" "testing" ) @@ -59,17 +58,3 @@ func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) { } } } - -// Expect the specified stats. -func expectStats(t *testing.T, expected, actual map[string]float64) { - for stat, expectedValue := range expected { - actualValue, ok := actual[stat] - if !ok { - log.Printf("Expected stat %s to exist: %s", stat, actual) - t.Fail() - } else if actualValue != expectedValue { - log.Printf("Expected stats %s to have value %f but had %f instead", stat, expectedValue, actualValue) - t.Fail() - } - } -} diff --git a/pkg/cgroups/fs/utils.go b/pkg/libcontainer/cgroups/fs/utils.go similarity index 56% rename from pkg/cgroups/fs/utils.go rename to pkg/libcontainer/cgroups/fs/utils.go index 8be65c97ea..f65622a80f 100644 --- a/pkg/cgroups/fs/utils.go +++ b/pkg/libcontainer/cgroups/fs/utils.go @@ -16,25 +16,25 @@ var ( // Parses a cgroup param and returns as name, value // i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234 -func getCgroupParamKeyValue(t string) (string, float64, error) { +func getCgroupParamKeyValue(t string) (string, uint64, error) { parts := strings.Fields(t) switch len(parts) { case 2: - value, err := strconv.ParseFloat(parts[1], 64) + value, err := strconv.ParseUint(parts[1], 10, 64) if err != nil { - return "", 0.0, fmt.Errorf("Unable to convert param value to float: %s", err) + return "", 0, fmt.Errorf("Unable to convert param value to uint64: %s", err) } return parts[0], value, nil default: - return "", 0.0, ErrNotValidFormat + return "", 0, ErrNotValidFormat } } -// Gets a single float64 value from the specified cgroup file. -func getCgroupParamFloat64(cgroupPath, cgroupFile string) (float64, error) { +// Gets a single int64 value from the specified cgroup file. +func getCgroupParamInt(cgroupPath, cgroupFile string) (uint64, error) { contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) if err != nil { - return -1.0, err + return 0, err } - return strconv.ParseFloat(strings.TrimSpace(string(contents)), 64) + return strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64) } diff --git a/pkg/cgroups/fs/utils_test.go b/pkg/libcontainer/cgroups/fs/utils_test.go similarity index 81% rename from pkg/cgroups/fs/utils_test.go rename to pkg/libcontainer/cgroups/fs/utils_test.go index c8f1b0172b..4dd2243efa 100644 --- a/pkg/cgroups/fs/utils_test.go +++ b/pkg/libcontainer/cgroups/fs/utils_test.go @@ -13,7 +13,7 @@ const ( floatString = "2048" ) -func TestGetCgroupParamsFloat64(t *testing.T) { +func TestGetCgroupParamsInt(t *testing.T) { // Setup tempdir. tempDir, err := ioutil.TempDir("", "cgroup_utils_test") if err != nil { @@ -27,7 +27,7 @@ func TestGetCgroupParamsFloat64(t *testing.T) { if err != nil { t.Fatal(err) } - value, err := getCgroupParamFloat64(tempDir, cgroupFile) + value, err := getCgroupParamInt(tempDir, cgroupFile) if err != nil { t.Fatal(err) } else if value != floatValue { @@ -39,7 +39,7 @@ func TestGetCgroupParamsFloat64(t *testing.T) { if err != nil { t.Fatal(err) } - value, err = getCgroupParamFloat64(tempDir, cgroupFile) + value, err = getCgroupParamInt(tempDir, cgroupFile) if err != nil { t.Fatal(err) } else if value != floatValue { @@ -51,7 +51,7 @@ func TestGetCgroupParamsFloat64(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = getCgroupParamFloat64(tempDir, cgroupFile) + _, err = getCgroupParamInt(tempDir, cgroupFile) if err == nil { t.Fatal("Expecting error, got none") } @@ -61,7 +61,7 @@ func TestGetCgroupParamsFloat64(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = getCgroupParamFloat64(tempDir, cgroupFile) + _, err = getCgroupParamInt(tempDir, cgroupFile) if err == nil { t.Fatal("Expecting error, got none") } diff --git a/pkg/libcontainer/cgroups/stats.go b/pkg/libcontainer/cgroups/stats.go new file mode 100644 index 0000000000..20a5f00a37 --- /dev/null +++ b/pkg/libcontainer/cgroups/stats.go @@ -0,0 +1,72 @@ +package cgroups + +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods,omitempty"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time,omitempty"` +} + +type CpuUsage struct { + // percentage of available CPUs currently being used. + PercentUsage uint64 `json:"percent_usage,omitempty"` + // nanoseconds of cpu time consumed over the last 100 ms. + CurrentUsage uint64 `json:"current_usage,omitempty"` + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + // Time spent by tasks of the cgroup in kernel mode. Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage,omitempty"` + ThrottlingData ThrottlingData `json:"throlling_data,omitempty"` +} + +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` +} + +type BlkioStatEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type BlkioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recusrive,omitempty"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` +} + +// TODO(Vishh): Remove freezer from stats since it does not logically belong in stats. +type FreezerStats struct { + ParentState string `json:"parent_state,omitempty"` + SelfState string `json:"self_state,omitempty"` +} + +type Stats struct { + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + FreezerStats FreezerStats `json:"freezer_stats,omitempty"` +} + +func NewStats() *Stats { + memoryStats := MemoryStats{Stats: make(map[string]uint64)} + return &Stats{MemoryStats: memoryStats} +} diff --git a/pkg/libcontainer/cgroups/systemd/apply_nosystemd.go b/pkg/libcontainer/cgroups/systemd/apply_nosystemd.go new file mode 100644 index 0000000000..c72bb11611 --- /dev/null +++ b/pkg/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -0,0 +1,25 @@ +// +build !linux + +package systemd + +import ( + "fmt" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" +) + +func UseSystemd() bool { + return false +} + +func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + return fmt.Errorf("Systemd not supported") +} diff --git a/pkg/cgroups/systemd/apply_systemd.go b/pkg/libcontainer/cgroups/systemd/apply_systemd.go similarity index 62% rename from pkg/cgroups/systemd/apply_systemd.go rename to pkg/libcontainer/cgroups/systemd/apply_systemd.go index c4b0937b63..c486dbeb9c 100644 --- a/pkg/cgroups/systemd/apply_systemd.go +++ b/pkg/libcontainer/cgroups/systemd/apply_systemd.go @@ -3,15 +3,18 @@ package systemd import ( + "bytes" + "fmt" "io/ioutil" "os" "path/filepath" "strconv" "strings" "sync" + "time" systemd1 "github.com/coreos/go-systemd/dbus" - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" "github.com/dotcloud/docker/pkg/systemd" "github.com/godbus/dbus" ) @@ -20,11 +23,6 @@ type systemdCgroup struct { cleanupDirs []string } -type DeviceAllow struct { - Node string - Permissions string -} - var ( connLock sync.Mutex theConn *systemd1.Conn @@ -78,7 +76,7 @@ type cgroupArg struct { func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { var ( - unitName = c.Parent + "-" + c.Name + ".scope" + unitName = getUnitName(c) slice = "system.slice" properties []systemd1.Property cpuArgs []cgroupArg @@ -115,26 +113,6 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { systemd1.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})}, ) - if !c.DeviceAccess { - properties = append(properties, - systemd1.Property{"DevicePolicy", dbus.MakeVariant("strict")}, - systemd1.Property{"DeviceAllow", dbus.MakeVariant([]DeviceAllow{ - {"/dev/null", "rwm"}, - {"/dev/zero", "rwm"}, - {"/dev/full", "rwm"}, - {"/dev/random", "rwm"}, - {"/dev/urandom", "rwm"}, - {"/dev/tty", "rwm"}, - {"/dev/console", "rwm"}, - {"/dev/tty0", "rwm"}, - {"/dev/tty1", "rwm"}, - {"/dev/pts/ptmx", "rwm"}, - // There is no way to add /dev/pts/* here atm, so we hack this manually below - // /dev/pts/* (how to add this?) - // Same with tuntap, which doesn't exist as a node most of the time - })}) - } - // Always enable accounting, this gets us the same behaviour as the fs implementation, // plus the kernel has some problems with joining the memory cgroup at a later time. properties = append(properties, @@ -166,22 +144,50 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { cgroup := props["ControlGroup"].(string) - if !c.DeviceAccess { + if !c.AllowAllDevices { + // Atm we can't use the systemd device support because of two missing things: + // * Support for wildcards to allow mknod on any device + // * Support for wildcards to allow /dev/pts support + // + // The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is + // in wide use. When both these are availalable we will be able to switch, but need to keep the old + // implementation for backwards compat. + // + // Note: we can't use systemd to set up the initial limits, and then change the cgroup + // because systemd will re-write the device settings if it needs to re-apply the cgroup context. + // This happens at least for v208 when any sibling unit is started. + mountpoint, err := cgroups.FindCgroupMountpoint("devices") if err != nil { return nil, err } - path := filepath.Join(mountpoint, cgroup) - - // /dev/pts/* - if err := ioutil.WriteFile(filepath.Join(path, "devices.allow"), []byte("c 136:* rwm"), 0700); err != nil { + initPath, err := cgroups.GetInitCgroupDir("devices") + if err != nil { return nil, err } - // tuntap - if err := ioutil.WriteFile(filepath.Join(path, "devices.allow"), []byte("c 10:200 rwm"), 0700); err != nil { + + dir := filepath.Join(mountpoint, initPath, c.Parent, c.Name) + + res.cleanupDirs = append(res.cleanupDirs, dir) + + if err := os.MkdirAll(dir, 0755); err != nil && !os.IsExist(err) { return nil, err } + + if err := ioutil.WriteFile(filepath.Join(dir, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil { + return nil, err + } + + if err := writeFile(dir, "devices.deny", "a"); err != nil { + return nil, err + } + + for _, dev := range c.AllowedDevices { + if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return nil, err + } + } } if len(cpuArgs) != 0 { @@ -214,6 +220,14 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { } } + // we need to manually join the freezer cgroup in systemd because it does not currently support it + // via the dbus api + freezerPath, err := joinFreezer(c, pid) + if err != nil { + return nil, err + } + res.cleanupDirs = append(res.cleanupDirs, freezerPath) + if len(cpusetArgs) != 0 { // systemd does not atm set up the cpuset controller, so we must manually // join it. Additionally that is a very finicky controller where each @@ -223,14 +237,19 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { if err != nil { return nil, err } + initPath, err := cgroups.GetInitCgroupDir("cpuset") if err != nil { return nil, err } - rootPath := filepath.Join(mountpoint, initPath) + var ( + foundCpus bool + foundMems bool - path := filepath.Join(mountpoint, initPath, c.Parent+"-"+c.Name) + rootPath = filepath.Join(mountpoint, initPath) + path = filepath.Join(mountpoint, initPath, c.Parent+"-"+c.Name) + ) res.cleanupDirs = append(res.cleanupDirs, path) @@ -238,9 +257,6 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { return nil, err } - foundCpus := false - foundMems := false - for _, arg := range cpusetArgs { if arg.File == "cpuset.cpus" { foundCpus = true @@ -285,6 +301,10 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { return &res, nil } +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + func (c *systemdCgroup) Cleanup() error { // systemd cleans up, we don't need to do much @@ -294,3 +314,78 @@ func (c *systemdCgroup) Cleanup() error { return nil } + +func joinFreezer(c *cgroups.Cgroup, pid int) (string, error) { + path, err := getFreezerPath(c) + if err != nil { + return "", err + } + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return "", err + } + + if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil { + return "", err + } + + return path, nil +} + +func getFreezerPath(c *cgroups.Cgroup) (string, error) { + mountpoint, err := cgroups.FindCgroupMountpoint("freezer") + if err != nil { + return "", err + } + + initPath, err := cgroups.GetInitCgroupDir("freezer") + if err != nil { + return "", err + } + + return filepath.Join(mountpoint, initPath, fmt.Sprintf("%s-%s", c.Parent, c.Name)), nil + +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + path, err := getFreezerPath(c) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(path, "freezer.state"), []byte(state), 0); err != nil { + return err + } + for { + state_, err := ioutil.ReadFile(filepath.Join(path, "freezer.state")) + if err != nil { + return err + } + if string(state) == string(bytes.TrimSpace(state_)) { + break + } + time.Sleep(1 * time.Millisecond) + } + return nil +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + unitName := getUnitName(c) + + mountpoint, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return nil, err + } + + props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName)) + if err != nil { + return nil, err + } + cgroup := props["ControlGroup"].(string) + + return cgroups.ReadProcsFile(filepath.Join(mountpoint, cgroup)) +} + +func getUnitName(c *cgroups.Cgroup) string { + return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name) +} diff --git a/pkg/cgroups/utils.go b/pkg/libcontainer/cgroups/utils.go similarity index 76% rename from pkg/cgroups/utils.go rename to pkg/libcontainer/cgroups/utils.go index 02a7f357f6..111c871477 100644 --- a/pkg/cgroups/utils.go +++ b/pkg/libcontainer/cgroups/utils.go @@ -4,6 +4,8 @@ import ( "bufio" "io" "os" + "path/filepath" + "strconv" "strings" "github.com/dotcloud/docker/pkg/mount" @@ -49,6 +51,30 @@ func GetInitCgroupDir(subsystem string) (string, error) { return parseCgroupFile(subsystem, f) } +func ReadProcsFile(dir string) ([]int, error) { + f, err := os.Open(filepath.Join(dir, "cgroup.procs")) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + s = bufio.NewScanner(f) + out = []int{} + ) + + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, pid) + } + } + return out, nil +} + func parseCgroupFile(subsystem string, r io.Reader) (string, error) { s := bufio.NewScanner(r) for s.Scan() { diff --git a/pkg/libcontainer/console/console.go b/pkg/libcontainer/console/console.go index 5f06aea225..79a480418f 100644 --- a/pkg/libcontainer/console/console.go +++ b/pkg/libcontainer/console/console.go @@ -17,29 +17,26 @@ func Setup(rootfs, consolePath, mountLabel string) error { oldMask := system.Umask(0000) defer system.Umask(oldMask) - stat, err := os.Stat(consolePath) - if err != nil { - return fmt.Errorf("stat console %s %s", consolePath, err) - } - var ( - st = stat.Sys().(*syscall.Stat_t) - dest = filepath.Join(rootfs, "dev/console") - ) - if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("remove %s %s", dest, err) - } if err := os.Chmod(consolePath, 0600); err != nil { return err } if err := os.Chown(consolePath, 0, 0); err != nil { return err } - if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil { - return fmt.Errorf("mknod %s %s", dest, err) - } if err := label.SetFileLabel(consolePath, mountLabel); err != nil { - return fmt.Errorf("set file label %s %s", dest, err) + return fmt.Errorf("set file label %s %s", consolePath, err) } + + dest := filepath.Join(rootfs, "dev/console") + + f, err := os.Create(dest) + if err != nil && !os.IsExist(err) { + return fmt.Errorf("create %s %s", dest, err) + } + if f != nil { + f.Close() + } + if err := system.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil { return fmt.Errorf("bind %s to %s %s", consolePath, dest, err) } diff --git a/pkg/libcontainer/container.go b/pkg/libcontainer/container.go index 5acdff3d29..c5864e948a 100644 --- a/pkg/libcontainer/container.go +++ b/pkg/libcontainer/container.go @@ -1,29 +1,71 @@ package libcontainer import ( - "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/devices" ) -// Context is a generic key value pair that allows -// arbatrary data to be sent +// Context is a generic key value pair that allows arbatrary data to be sent type Context map[string]string -// Container defines configuration options for how a -// container is setup inside a directory and how a process should be executed +// Container defines configuration options for executing a process inside a contained environment type Container struct { - Hostname string `json:"hostname,omitempty"` // hostname - ReadonlyFs bool `json:"readonly_fs,omitempty"` // set the containers rootfs as readonly - NoPivotRoot bool `json:"no_pivot_root,omitempty"` // this can be enabled if you are running in ramdisk - User string `json:"user,omitempty"` // user to execute the process as - WorkingDir string `json:"working_dir,omitempty"` // current working directory - Env []string `json:"environment,omitempty"` // environment to set - Tty bool `json:"tty,omitempty"` // setup a proper tty or not - Namespaces map[string]bool `json:"namespaces,omitempty"` // namespaces to apply - CapabilitiesMask map[string]bool `json:"capabilities_mask,omitempty"` // capabilities to drop - Networks []*Network `json:"networks,omitempty"` // nil for host's network stack - Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups - Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) - Mounts Mounts `json:"mounts,omitempty"` + // Hostname optionally sets the container's hostname if provided + Hostname string `json:"hostname,omitempty"` + + // ReadonlyFs will remount the container's rootfs as readonly where only externally mounted + // bind mounts are writtable + ReadonlyFs bool `json:"readonly_fs,omitempty"` + + // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs + // This is a common option when the container is running in ramdisk + NoPivotRoot bool `json:"no_pivot_root,omitempty"` + + // User will set the uid and gid of the executing process running inside the container + User string `json:"user,omitempty"` + + // WorkingDir will change the processes current working directory inside the container's rootfs + WorkingDir string `json:"working_dir,omitempty"` + + // Env will populate the processes environment with the provided values + // Any values from the parent processes will be cleared before the values + // provided in Env are provided to the process + Env []string `json:"environment,omitempty"` + + // Tty when true will allocate a pty slave on the host for access by the container's process + // and ensure that it is mounted inside the container's rootfs + Tty bool `json:"tty,omitempty"` + + // Namespaces specifies the container's namespaces that it should setup when cloning the init process + // If a namespace is not provided that namespace is shared from the container's parent process + Namespaces map[string]bool `json:"namespaces,omitempty"` + + // Capabilities specify the capabilities to keep when executing the process inside the container + // All capbilities not specified will be dropped from the processes capability mask + Capabilities []string `json:"capabilities,omitempty"` + + // Networks specifies the container's network setup to be created + Networks []*Network `json:"networks,omitempty"` + + // Routes can be specified to create entries in the route table as the container is started + Routes []*Route `json:"routes,omitempty"` + + // Cgroups specifies specific cgroup settings for the various subsystems that the container is + // placed into to limit the resources the container has available + Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` + + // Context is a generic key value format that allows for additional settings to be passed + // on the container's creation + // This is commonly used to specify apparmor profiles, selinux labels, and different restrictions + // placed on the container's processes + Context Context `json:"context,omitempty"` + + // Mounts specify additional source and destination paths that will be mounted inside the container's + // rootfs and mount namespace if specified + Mounts Mounts `json:"mounts,omitempty"` + + // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! + DeviceNodes []*devices.Device `json:"device_nodes,omitempty"` } // Network defines configuration for a container's networking stack @@ -31,9 +73,41 @@ type Container struct { // The network configuration can be omited from a container causing the // container to be setup with the host's networking stack type Network struct { - Type string `json:"type,omitempty"` // type of networking to setup i.e. veth, macvlan, etc - Context Context `json:"context,omitempty"` // generic context for type specific networking options - Address string `json:"address,omitempty"` - Gateway string `json:"gateway,omitempty"` - Mtu int `json:"mtu,omitempty"` + // Type sets the networks type, commonly veth and loopback + Type string `json:"type,omitempty"` + + // Context is a generic key value format for setting additional options that are specific to + // the network type + Context Context `json:"context,omitempty"` + + // Address contains the IP and mask to set on the network interface + Address string `json:"address,omitempty"` + + // Gateway sets the gateway address that is used as the default for the interface + Gateway string `json:"gateway,omitempty"` + + // Mtu sets the mtu value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + Mtu int `json:"mtu,omitempty"` +} + +// Routes can be specified to create entries in the route table as the container is started +// +// All of destination, source, and gateway should be either IPv4 or IPv6. +// One of the three options must be present, and ommitted entries will use their +// IP family default for the route table. For IPv4 for example, setting the +// gateway to 1.2.3.4 and the interface to eth0 will set up a standard +// destination of 0.0.0.0(or *) when viewed in the route table. +type Route struct { + // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6 + Destination string `json:"destination,omitempty"` + + // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6 + Source string `json:"source,omitempty"` + + // Sets the gateway. Accepts IPv4 and IPv6 + Gateway string `json:"gateway,omitempty"` + + // The device to set this route up for, for example: eth0 + InterfaceName string `json:"interface_name,omitempty"` } diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index 33d79600d4..7448a077e0 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -24,24 +24,19 @@ "mtu": 1500 } ], - "capabilities_mask": { - "SYSLOG": false, - "MKNOD": true, - "NET_ADMIN": false, - "MAC_ADMIN": false, - "MAC_OVERRIDE": false, - "AUDIT_CONTROL": false, - "AUDIT_WRITE": false, - "SYS_TTY_CONFIG": false, - "SETPCAP": false, - "SYS_MODULE": false, - "SYS_RAWIO": false, - "SYS_PACCT": false, - "SYS_ADMIN": false, - "SYS_NICE": false, - "SYS_RESOURCE": false, - "SYS_TIME": false - }, + "routes": [ + { + "gateway": "172.17.42.1", + "interface_name": "eth0" + }, + { + "destination": "192.168.0.0/24", + "interface_name": "eth0" + } + ], + "capabilities": [ + "MKNOD" + ], "cgroups": { "name": "docker-koye", "parent": "docker" @@ -58,5 +53,55 @@ { "type": "devtmpfs" } + ], + "device_nodes": [ + { + "path": "/dev/null", + "type": 99, + "major_number": 1, + "minor_number": 3, + "cgroup_permissions": "rwm", + "file_mode": 438 + }, + { + "path": "/dev/zero", + "type": 99, + "major_number": 1, + "minor_number": 5, + "cgroup_permissions": "rwm", + "file_mode": 438 + }, + { + "path": "/dev/full", + "type": 99, + "major_number": 1, + "minor_number": 7, + "cgroup_permissions": "rwm", + "file_mode": 438 + }, + { + "path": "/dev/tty", + "type": 99, + "major_number": 5, + "minor_number": 0, + "cgroup_permissions": "rwm", + "file_mode": 438 + }, + { + "path": "/dev/urandom", + "type": 99, + "major_number": 1, + "minor_number": 9, + "cgroup_permissions": "rwm", + "file_mode": 438 + }, + { + "path": "/dev/random", + "type": 99, + "major_number": 1, + "minor_number": 8, + "cgroup_permissions": "rwm", + "file_mode": 438 + } ] } diff --git a/pkg/libcontainer/container_test.go b/pkg/libcontainer/container_test.go index c02385af3f..838281833f 100644 --- a/pkg/libcontainer/container_test.go +++ b/pkg/libcontainer/container_test.go @@ -6,6 +6,16 @@ import ( "testing" ) +// Checks whether the expected capability is specified in the capabilities. +func contains(expected string, values []string) bool { + for _, v := range values { + if v == expected { + return true + } + } + return false +} + func TestContainerJsonFormat(t *testing.T) { f, err := os.Open("container.json") if err != nil { @@ -27,6 +37,11 @@ func TestContainerJsonFormat(t *testing.T) { t.Fail() } + if len(container.Routes) != 2 { + t.Log("should have found 2 routes") + t.Fail() + } + if !container.Namespaces["NEWNET"] { t.Log("namespaces should contain NEWNET") t.Fail() @@ -37,22 +52,17 @@ func TestContainerJsonFormat(t *testing.T) { t.Fail() } - if _, exists := container.CapabilitiesMask["SYS_ADMIN"]; !exists { - t.Log("capabilities mask should contain SYS_ADMIN") - t.Fail() - } - - if container.CapabilitiesMask["SYS_ADMIN"] { + if contains("SYS_ADMIN", container.Capabilities) { t.Log("SYS_ADMIN should not be enabled in capabilities mask") t.Fail() } - if !container.CapabilitiesMask["MKNOD"] { + if !contains("MKNOD", container.Capabilities) { t.Log("MKNOD should be enabled in capabilities mask") t.Fail() } - if container.CapabilitiesMask["SYS_CHROOT"] { + if contains("SYS_CHROOT", container.Capabilities) { t.Log("capabilities mask should not contain SYS_CHROOT") t.Fail() } diff --git a/pkg/libcontainer/devices/defaults.go b/pkg/libcontainer/devices/defaults.go new file mode 100644 index 0000000000..393c438c59 --- /dev/null +++ b/pkg/libcontainer/devices/defaults.go @@ -0,0 +1,159 @@ +package devices + +var ( + // These are devices that are to be both allowed and created. + + DefaultSimpleDevices = []*Device{ + // /dev/null and zero + { + Path: "/dev/null", + Type: 'c', + MajorNumber: 1, + MinorNumber: 3, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/zero", + Type: 'c', + MajorNumber: 1, + MinorNumber: 5, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + { + Path: "/dev/full", + Type: 'c', + MajorNumber: 1, + MinorNumber: 7, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // consoles and ttys + { + Path: "/dev/tty", + Type: 'c', + MajorNumber: 5, + MinorNumber: 0, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // /dev/urandom,/dev/random + { + Path: "/dev/urandom", + Type: 'c', + MajorNumber: 1, + MinorNumber: 9, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/random", + Type: 'c', + MajorNumber: 1, + MinorNumber: 8, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + } + + DefaultAllowedDevices = append([]*Device{ + // allow mknod for any device + { + Type: 'c', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + { + Type: 'b', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + + { + Path: "/dev/console", + Type: 'c', + MajorNumber: 5, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty0", + Type: 'c', + MajorNumber: 4, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty1", + Type: 'c', + MajorNumber: 4, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + // /dev/pts/ - pts namespaces are "coming soon" + { + Path: "", + Type: 'c', + MajorNumber: 136, + MinorNumber: Wildcard, + CgroupPermissions: "rwm", + }, + { + Path: "", + Type: 'c', + MajorNumber: 5, + MinorNumber: 2, + CgroupPermissions: "rwm", + }, + + // tuntap + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 200, + CgroupPermissions: "rwm", + }, + + /*// fuse + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + + // rtc + { + Path: "", + Type: 'c', + MajorNumber: 254, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + */ + }, DefaultSimpleDevices...) + + DefaultAutoCreatedDevices = append([]*Device{ + { + // /dev/fuse is created but not allowed. + // This is to allow java to work. Because java + // Insists on there being a /dev/fuse + // https://github.com/dotcloud/docker/issues/514 + // https://github.com/dotcloud/docker/issues/2393 + // + Path: "/dev/fuse", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + }, DefaultSimpleDevices...) +) diff --git a/pkg/libcontainer/devices/devices.go b/pkg/libcontainer/devices/devices.go new file mode 100644 index 0000000000..f6bee56df2 --- /dev/null +++ b/pkg/libcontainer/devices/devices.go @@ -0,0 +1,119 @@ +package devices + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +const ( + Wildcard = -1 +) + +var ( + ErrNotADeviceNode = errors.New("not a device node") +) + +type Device struct { + Type rune `json:"type,omitempty"` + Path string `json:"path,omitempty"` // It is fine if this is an empty string in the case that you are using Wildcards + MajorNumber int64 `json:"major_number,omitempty"` // Use the wildcard constant for wildcards. + MinorNumber int64 `json:"minor_number,omitempty"` // Use the wildcard constant for wildcards. + CgroupPermissions string `json:"cgroup_permissions,omitempty"` // Typically just "rwm" + FileMode os.FileMode `json:"file_mode,omitempty"` // The permission bits of the file's mode +} + +func GetDeviceNumberString(deviceNumber int64) string { + if deviceNumber == Wildcard { + return "*" + } else { + return fmt.Sprintf("%d", deviceNumber) + } +} + +func (device *Device) GetCgroupAllowString() string { + return fmt.Sprintf("%c %s:%s %s", device.Type, GetDeviceNumberString(device.MajorNumber), GetDeviceNumberString(device.MinorNumber), device.CgroupPermissions) +} + +// Given the path to a device and it's cgroup_permissions(which cannot be easilly queried) look up the information about a linux device and return that information as a Device struct. +func GetDevice(path string, cgroupPermissions string) (*Device, error) { + fileInfo, err := os.Stat(path) + if err != nil { + return nil, err + } + + var ( + devType rune + mode = fileInfo.Mode() + fileModePermissionBits = os.FileMode.Perm(mode) + ) + + switch { + case mode&os.ModeDevice == 0: + return nil, ErrNotADeviceNode + case mode&os.ModeCharDevice != 0: + fileModePermissionBits |= syscall.S_IFCHR + devType = 'c' + default: + fileModePermissionBits |= syscall.S_IFBLK + devType = 'b' + } + + stat_t, ok := fileInfo.Sys().(*syscall.Stat_t) + if !ok { + return nil, fmt.Errorf("cannot determine the device number for device %s", path) + } + devNumber := int(stat_t.Rdev) + + return &Device{ + Type: devType, + Path: path, + MajorNumber: Major(devNumber), + MinorNumber: Minor(devNumber), + CgroupPermissions: cgroupPermissions, + FileMode: fileModePermissionBits, + }, nil +} + +func GetHostDeviceNodes() ([]*Device, error) { + return getDeviceNodes("/dev") +} + +func getDeviceNodes(path string) ([]*Device, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + out := []*Device{} + for _, f := range files { + if f.IsDir() { + switch f.Name() { + case "pts", "shm", "fd": + continue + default: + sub, err := getDeviceNodes(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + + out = append(out, sub...) + continue + } + } + + device, err := GetDevice(filepath.Join(path, f.Name()), "rwm") + if err != nil { + if err == ErrNotADeviceNode { + continue + } + return nil, err + } + out = append(out, device) + } + + return out, nil +} diff --git a/pkg/libcontainer/devices/number.go b/pkg/libcontainer/devices/number.go new file mode 100644 index 0000000000..3aae380bb1 --- /dev/null +++ b/pkg/libcontainer/devices/number.go @@ -0,0 +1,26 @@ +package devices + +/* + +This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved. + +You can read what they are here: + + - http://www.makelinux.net/ldd3/chp-3-sect-2 + - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94 + +Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9 + +*/ + +func Major(devNumber int) int64 { + return int64((devNumber >> 8) & 0xfff) +} + +func Minor(devNumber int) int64 { + return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)) +} + +func Mkdev(majorNumber int64, minorNumber int64) int { + return int((majorNumber << 8) | (minorNumber & 0xff) | ((minorNumber & 0xfff00) << 12)) +} diff --git a/pkg/libcontainer/mount/init.go b/pkg/libcontainer/mount/init.go index cfe61d1532..af7a521c46 100644 --- a/pkg/libcontainer/mount/init.go +++ b/pkg/libcontainer/mount/init.go @@ -11,6 +11,7 @@ import ( "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/mount/nodes" + "github.com/dotcloud/docker/pkg/symlink" "github.com/dotcloud/docker/pkg/system" ) @@ -36,7 +37,7 @@ func InitializeMountNamespace(rootfs, console string, container *libcontainer.Co flag = syscall.MS_SLAVE } if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil { - return fmt.Errorf("mounting / as slave %s", err) + return fmt.Errorf("mounting / with flags %X %s", (flag | syscall.MS_REC), err) } if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { return fmt.Errorf("mouting %s as bind %s", rootfs, err) @@ -47,12 +48,15 @@ func InitializeMountNamespace(rootfs, console string, container *libcontainer.Co if err := setupBindmounts(rootfs, container.Mounts); err != nil { return fmt.Errorf("bind mounts %s", err) } - if err := nodes.CopyN(rootfs, nodes.DefaultNodes); err != nil { - return fmt.Errorf("copy dev nodes %s", err) + if err := nodes.CreateDeviceNodes(rootfs, container.DeviceNodes); err != nil { + return fmt.Errorf("create device nodes %s", err) } if err := SetupPtmx(rootfs, console, container.Context["mount_label"]); err != nil { return err } + if err := setupDevSymlinks(rootfs); err != nil { + return fmt.Errorf("dev symlinks %s", err) + } if err := system.Chdir(rootfs); err != nil { return fmt.Errorf("chdir into %s %s", rootfs, err) } @@ -91,6 +95,56 @@ func mountSystem(rootfs string, container *libcontainer.Container) error { return nil } +func createIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } else { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + } + return nil +} + +func setupDevSymlinks(rootfs string) error { + var links = [][2]string{ + {"/proc/self/fd", "/dev/fd"}, + {"/proc/self/fd/0", "/dev/stdin"}, + {"/proc/self/fd/1", "/dev/stdout"}, + {"/proc/self/fd/2", "/dev/stderr"}, + } + + // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink + // in /dev if it exists in /proc. + if _, err := os.Stat("/proc/kcore"); err == nil { + links = append(links, [2]string{"/proc/kcore", "/dev/kcore"}) + } + + for _, link := range links { + var ( + src = link[0] + dst = filepath.Join(rootfs, link[1]) + ) + + if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { + return fmt.Errorf("symlink %s %s %s", src, dst, err) + } + } + + return nil +} + func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error { for _, m := range bindMounts.OfType("bind") { var ( @@ -100,6 +154,21 @@ func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error { if !m.Writable { flags = flags | syscall.MS_RDONLY } + + stat, err := os.Stat(m.Source) + if err != nil { + return err + } + + dest, err = symlink.FollowSymlinkInScope(dest, rootfs) + if err != nil { + return err + } + + if err := createIfNotExists(dest, stat.IsDir()); err != nil { + return fmt.Errorf("Creating new bind-mount target, %s", err) + } + if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil { return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err) } @@ -123,12 +192,10 @@ func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mo systemMounts := []mount{ {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, {source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}, + {source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}, {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, } - if len(mounts.OfType("devtmpfs")) == 1 { - systemMounts = append([]mount{{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}}, systemMounts...) - } return systemMounts } diff --git a/pkg/libcontainer/mount/nodes/nodes.go b/pkg/libcontainer/mount/nodes/nodes.go index 5022f85b0b..dd67ae2d58 100644 --- a/pkg/libcontainer/mount/nodes/nodes.go +++ b/pkg/libcontainer/mount/nodes/nodes.go @@ -4,46 +4,50 @@ package nodes import ( "fmt" - "github.com/dotcloud/docker/pkg/system" "os" "path/filepath" "syscall" + + "github.com/dotcloud/docker/pkg/libcontainer/devices" + "github.com/dotcloud/docker/pkg/system" ) -// Default list of device nodes to copy -var DefaultNodes = []string{ - "null", - "zero", - "full", - "random", - "urandom", - "tty", -} - -// CopyN copies the device node from the host into the rootfs -func CopyN(rootfs string, nodesToCopy []string) error { +// Create the device nodes in the container. +func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error { oldMask := system.Umask(0000) defer system.Umask(oldMask) - for _, node := range nodesToCopy { - if err := Copy(rootfs, node); err != nil { + for _, node := range nodesToCreate { + if err := CreateDeviceNode(rootfs, node); err != nil { return err } } return nil } -func Copy(rootfs, node string) error { - stat, err := os.Stat(filepath.Join("/dev", node)) - if err != nil { +// Creates the device node in the rootfs of the container. +func CreateDeviceNode(rootfs string, node *devices.Device) error { + var ( + dest = filepath.Join(rootfs, node.Path) + parent = filepath.Dir(dest) + ) + + if err := os.MkdirAll(parent, 0755); err != nil { return err } - var ( - dest = filepath.Join(rootfs, "dev", node) - st = stat.Sys().(*syscall.Stat_t) - ) - if err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) { - return fmt.Errorf("copy %s %s", node, err) + + fileMode := node.FileMode + switch node.Type { + case 'c': + fileMode |= syscall.S_IFCHR + case 'b': + fileMode |= syscall.S_IFBLK + default: + return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) + } + + if err := system.Mknod(dest, uint32(fileMode), devices.Mkdev(node.MajorNumber, node.MinorNumber)); err != nil && !os.IsExist(err) { + return fmt.Errorf("mknod %s %s", node.Path, err) } return nil } diff --git a/pkg/libcontainer/mount/nodes/nodes_unsupported.go b/pkg/libcontainer/mount/nodes/nodes_unsupported.go new file mode 100644 index 0000000000..0e5d12c73e --- /dev/null +++ b/pkg/libcontainer/mount/nodes/nodes_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package nodes + +import ( + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/devices" +) + +func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error { + return libcontainer.ErrUnsupported +} diff --git a/pkg/libcontainer/nsinit/create.go b/pkg/libcontainer/namespaces/create.go similarity index 92% rename from pkg/libcontainer/nsinit/create.go rename to pkg/libcontainer/namespaces/create.go index d5cba464d2..60b2a2db02 100644 --- a/pkg/libcontainer/nsinit/create.go +++ b/pkg/libcontainer/namespaces/create.go @@ -1,4 +1,4 @@ -package nsinit +package namespaces import ( "os" diff --git a/pkg/libcontainer/namespaces/exec.go b/pkg/libcontainer/namespaces/exec.go new file mode 100644 index 0000000000..288205ea60 --- /dev/null +++ b/pkg/libcontainer/namespaces/exec.go @@ -0,0 +1,176 @@ +// +build linux + +package namespaces + +import ( + "os" + "os/exec" + "syscall" + + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups/fs" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups/systemd" + "github.com/dotcloud/docker/pkg/libcontainer/network" + "github.com/dotcloud/docker/pkg/system" +) + +// Exec performes setup outside of a namespace so that a container can be +// executed. Exec is a high level function for working with container namespaces. +func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { + var ( + master *os.File + console string + err error + ) + + // create a pipe so that we can syncronize with the namespaced process and + // pass the veth name to the child + syncPipe, err := NewSyncPipe() + if err != nil { + return -1, err + } + + if container.Tty { + master, console, err = system.CreateMasterAndConsole() + if err != nil { + return -1, err + } + term.SetMaster(master) + } + + command := createCommand(container, console, rootfs, dataPath, os.Args[0], syncPipe.child, args) + + if err := term.Attach(command); err != nil { + return -1, err + } + defer term.Close() + + if err := command.Start(); err != nil { + return -1, err + } + + started, err := system.GetProcessStartTime(command.Process.Pid) + if err != nil { + return -1, err + } + if err := WritePid(dataPath, command.Process.Pid, started); err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + defer DeletePid(dataPath) + + // Do this before syncing with child so that no children + // can escape the cgroup + cleaner, err := SetupCgroups(container, command.Process.Pid) + if err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + if cleaner != nil { + defer cleaner.Cleanup() + } + + if err := InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + + // Sync with child + syncPipe.Close() + + if startCallback != nil { + startCallback() + } + + if err := command.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return -1, err + } + } + return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil +} + +// DefaultCreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces +// defined on the container's configuration and use the current binary as the init with the +// args provided +// +// console: the /dev/console to setup inside the container +// init: the progam executed inside the namespaces +// root: the path to the container json file and information +// pipe: sync pipe to syncronize the parent and child processes +// args: the arguemnts to pass to the container to run as the user's program +func DefaultCreateCommand(container *libcontainer.Container, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + // get our binary name from arg0 so we can always reexec ourself + env := []string{ + "console=" + console, + "pipe=3", + "data_path=" + dataPath, + } + + /* + TODO: move user and wd into env + if user != "" { + env = append(env, "user="+user) + } + if workingDir != "" { + env = append(env, "wd="+workingDir) + } + */ + + command := exec.Command(init, append([]string{"init"}, args...)...) + // make sure the process is executed inside the context of the rootfs + command.Dir = rootfs + command.Env = append(os.Environ(), env...) + + system.SetCloneFlags(command, uintptr(GetNamespaceFlags(container.Namespaces))) + command.SysProcAttr.Pdeathsig = syscall.SIGKILL + command.ExtraFiles = []*os.File{pipe} + + return command +} + +// SetupCgroups applies the cgroup restrictions to the process running in the contaienr based +// on the container's configuration +func SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) { + if container.Cgroups != nil { + c := container.Cgroups + if systemd.UseSystemd() { + return systemd.Apply(c, nspid) + } + return fs.Apply(c, nspid) + } + return nil, nil +} + +// InitializeNetworking creates the container's network stack outside of the namespace and moves +// interfaces into the container's net namespaces if necessary +func InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error { + context := libcontainer.Context{} + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + if err := strategy.Create(config, nspid, context); err != nil { + return err + } + } + return pipe.SendToChild(context) +} + +// GetNamespaceFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare, and setns +func GetNamespaceFlags(namespaces map[string]bool) (flag int) { + for key, enabled := range namespaces { + if enabled { + if ns := libcontainer.GetNamespace(key); ns != nil { + flag |= ns.Value + } + } + } + return flag +} diff --git a/pkg/libcontainer/namespaces/execin.go b/pkg/libcontainer/namespaces/execin.go new file mode 100644 index 0000000000..4d5671e778 --- /dev/null +++ b/pkg/libcontainer/namespaces/execin.go @@ -0,0 +1,56 @@ +// +build linux + +package namespaces + +import ( + "encoding/json" + "os" + "strconv" + + "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/system" +) + +// ExecIn uses an existing pid and joins the pid's namespaces with the new command. +func ExecIn(container *libcontainer.Container, nspid int, args []string) error { + // TODO(vmarmol): If this gets too long, send it over a pipe to the child. + // Marshall the container into JSON since it won't be available in the namespace. + containerJson, err := json.Marshal(container) + if err != nil { + return err + } + + // TODO(vmarmol): Move this to the container JSON. + processLabel, err := label.GetPidCon(nspid) + if err != nil { + return err + } + + // Enter the namespace and then finish setup + finalArgs := []string{os.Args[0], "nsenter", strconv.Itoa(nspid), processLabel, string(containerJson)} + finalArgs = append(finalArgs, args...) + if err := system.Execv(finalArgs[0], finalArgs[0:], os.Environ()); err != nil { + return err + } + panic("unreachable") +} + +// NsEnter is run after entering the namespace. +func NsEnter(container *libcontainer.Container, processLabel string, nspid int, args []string) error { + // clear the current processes env and replace it with the environment + // defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return err + } + if err := FinalizeNamespace(container); err != nil { + return err + } + if err := label.SetProcessLabel(processLabel); err != nil { + return err + } + if err := system.Execv(args[0], args[0:], container.Env); err != nil { + return err + } + panic("unreachable") +} diff --git a/pkg/libcontainer/namespaces/init.go b/pkg/libcontainer/namespaces/init.go new file mode 100644 index 0000000000..b53c56668d --- /dev/null +++ b/pkg/libcontainer/namespaces/init.go @@ -0,0 +1,235 @@ +// +build linux + +package namespaces + +import ( + "fmt" + "os" + "runtime" + "strings" + "syscall" + + "github.com/dotcloud/docker/pkg/apparmor" + "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/console" + "github.com/dotcloud/docker/pkg/libcontainer/mount" + "github.com/dotcloud/docker/pkg/libcontainer/network" + "github.com/dotcloud/docker/pkg/libcontainer/security/capabilities" + "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" + "github.com/dotcloud/docker/pkg/libcontainer/utils" + "github.com/dotcloud/docker/pkg/netlink" + "github.com/dotcloud/docker/pkg/system" + "github.com/dotcloud/docker/pkg/user" +) + +// Init is the init process that first runs inside a new namespace to setup mounts, users, networking, +// and other options required for the new container. +func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error { + rootfs, err := utils.ResolveRootfs(uncleanRootfs) + if err != nil { + return err + } + + // clear the current processes env and replace it with the environment + // defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return err + } + + // We always read this as it is a way to sync with the parent as well + context, err := syncPipe.ReadFromParent() + if err != nil { + syncPipe.Close() + return err + } + syncPipe.Close() + + if consolePath != "" { + if err := console.OpenAndDup(consolePath); err != nil { + return err + } + } + if _, err := system.Setsid(); err != nil { + return fmt.Errorf("setsid %s", err) + } + if consolePath != "" { + if err := system.Setctty(); err != nil { + return fmt.Errorf("setctty %s", err) + } + } + if err := setupNetwork(container, context); err != nil { + return fmt.Errorf("setup networking %s", err) + } + if err := setupRoute(container); err != nil { + return fmt.Errorf("setup route %s", err) + } + + label.Init() + + if err := mount.InitializeMountNamespace(rootfs, consolePath, container); err != nil { + return fmt.Errorf("setup mount namespace %s", err) + } + if container.Hostname != "" { + if err := system.Sethostname(container.Hostname); err != nil { + return fmt.Errorf("sethostname %s", err) + } + } + + runtime.LockOSThread() + + if err := apparmor.ApplyProfile(container.Context["apparmor_profile"]); err != nil { + return fmt.Errorf("set apparmor profile %s: %s", container.Context["apparmor_profile"], err) + } + if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { + return fmt.Errorf("set process label %s", err) + } + if container.Context["restrictions"] != "" { + if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus", "sys"); err != nil { + return err + } + } + + pdeathSignal, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + + if err := FinalizeNamespace(container); err != nil { + return fmt.Errorf("finalize namespace %s", err) + } + + // FinalizeNamespace can change user/group which clears the parent death + // signal, so we restore it here. + if err := RestoreParentDeathSignal(pdeathSignal); err != nil { + return fmt.Errorf("restore parent death signal %s", err) + } + + return system.Execv(args[0], args[0:], container.Env) +} + +// RestoreParentDeathSignal sets the parent death signal to old. +func RestoreParentDeathSignal(old int) error { + if old == 0 { + return nil + } + + current, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + + if old == current { + return nil + } + + if err := system.ParentDeathSignal(uintptr(old)); err != nil { + return fmt.Errorf("set parent death signal %s", err) + } + + // Signal self if parent is already dead. Does nothing if running in a new + // PID namespace, as Getppid will always return 0. + if syscall.Getppid() == 1 { + return syscall.Kill(syscall.Getpid(), syscall.SIGKILL) + } + + return nil +} + +// SetupUser changes the groups, gid, and uid for the user inside the container +func SetupUser(u string) error { + uid, gid, suppGids, err := user.GetUserGroupSupplementary(u, syscall.Getuid(), syscall.Getgid()) + if err != nil { + return fmt.Errorf("get supplementary groups %s", err) + } + if err := system.Setgroups(suppGids); err != nil { + return fmt.Errorf("setgroups %s", err) + } + if err := system.Setgid(gid); err != nil { + return fmt.Errorf("setgid %s", err) + } + if err := system.Setuid(uid); err != nil { + return fmt.Errorf("setuid %s", err) + } + return nil +} + +// setupVethNetwork uses the Network config if it is not nil to initialize +// the new veth interface inside the container for use by changing the name to eth0 +// setting the MTU and IP address along with the default gateway +func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error { + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + + err1 := strategy.Initialize(config, context) + if err1 != nil { + return err1 + } + } + return nil +} + +func setupRoute(container *libcontainer.Container) error { + for _, config := range container.Routes { + if err := netlink.AddRoute(config.Destination, config.Source, config.Gateway, config.InterfaceName); err != nil { + return err + } + } + return nil +} + +// FinalizeNamespace drops the caps, sets the correct user +// and working dir, and closes any leaky file descriptors +// before execing the command inside the namespace +func FinalizeNamespace(container *libcontainer.Container) error { + if err := system.CloseFdsFrom(3); err != nil { + return fmt.Errorf("close open file descriptors %s", err) + } + + // drop capabilities in bounding set before changing user + if err := capabilities.DropBoundingSet(container); err != nil { + return fmt.Errorf("drop bounding set %s", err) + } + + // preserve existing capabilities while we change users + if err := system.SetKeepCaps(); err != nil { + return fmt.Errorf("set keep caps %s", err) + } + + if err := SetupUser(container.User); err != nil { + return fmt.Errorf("setup user %s", err) + } + + if err := system.ClearKeepCaps(); err != nil { + return fmt.Errorf("clear keep caps %s", err) + } + + // drop all other capabilities + if err := capabilities.DropCapabilities(container); err != nil { + return fmt.Errorf("drop capabilities %s", err) + } + + if container.WorkingDir != "" { + if err := system.Chdir(container.WorkingDir); err != nil { + return fmt.Errorf("chdir to %s %s", container.WorkingDir, err) + } + } + return nil +} + +func LoadContainerEnvironment(container *libcontainer.Container) error { + os.Clearenv() + for _, pair := range container.Env { + p := strings.SplitN(pair, "=", 2) + if len(p) < 2 { + return fmt.Errorf("invalid environment '%v'", pair) + } + if err := os.Setenv(p[0], p[1]); err != nil { + return err + } + } + return nil +} diff --git a/pkg/libcontainer/namespaces/nsenter.go b/pkg/libcontainer/namespaces/nsenter.go new file mode 100644 index 0000000000..d5c2e761b7 --- /dev/null +++ b/pkg/libcontainer/namespaces/nsenter.go @@ -0,0 +1,143 @@ +package namespaces + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const kBufSize = 256; + +void get_args(int *argc, char ***argv) { + // Read argv + int fd = open("/proc/self/cmdline", O_RDONLY); + + // Read the whole commandline. + ssize_t contents_size = 0; + ssize_t contents_offset = 0; + char *contents = NULL; + ssize_t bytes_read = 0; + do { + contents_size += kBufSize; + contents = (char *) realloc(contents, contents_size); + bytes_read = read(fd, contents + contents_offset, contents_size - contents_offset); + contents_offset += bytes_read; + } while (bytes_read > 0); + close(fd); + + // Parse the commandline into an argv. /proc/self/cmdline has \0 delimited args. + ssize_t i; + *argc = 0; + for (i = 0; i < contents_offset; i++) { + if (contents[i] == '\0') { + (*argc)++; + } + } + *argv = (char **) malloc(sizeof(char *) * ((*argc) + 1)); + int idx; + for (idx = 0; idx < (*argc); idx++) { + (*argv)[idx] = contents; + contents += strlen(contents) + 1; + } + (*argv)[*argc] = NULL; +} + +void nsenter() { + int argc; + char **argv; + get_args(&argc, &argv); + + // Ignore if this is not for us. + if (argc < 2 || strcmp(argv[1], "nsenter") != 0) { + return; + } + + // USAGE: nsenter ... + if (argc < 6) { + fprintf(stderr, "nsenter: Incorrect usage, not enough arguments\n"); + exit(1); + } + pid_t init_pid = strtol(argv[2], NULL, 10); + if (errno != 0 || init_pid <= 0) { + fprintf(stderr, "nsenter: Failed to parse PID from \"%s\" with error: \"%s\"\n", argv[2], strerror(errno)); + exit(1); + } + argc -= 3; + argv += 3; + + // Setns on all supported namespaces. + char ns_dir[kBufSize]; + memset(ns_dir, 0, kBufSize); + if (snprintf(ns_dir, kBufSize - 1, "/proc/%d/ns/", init_pid) < 0) { + fprintf(stderr, "nsenter: Error getting ns dir path with error: \"%s\"\n", strerror(errno)); + exit(1); + } + struct dirent *dent; + DIR *dir = opendir(ns_dir); + if (dir == NULL) { + fprintf(stderr, "nsenter: Failed to open directory \"%s\" with error: \"%s\"\n", ns_dir, strerror(errno)); + exit(1); + } + + while((dent = readdir(dir)) != NULL) { + if(strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0 || strcmp(dent->d_name, "user") == 0) { + continue; + } + + // Get and open the namespace for the init we are joining.. + char buf[kBufSize]; + memset(buf, 0, kBufSize); + strncat(buf, ns_dir, kBufSize - 1); + strncat(buf, dent->d_name, kBufSize - 1); + int fd = open(buf, O_RDONLY); + if (fd == -1) { + fprintf(stderr, "nsenter: Failed to open ns file \"%s\" for ns \"%s\" with error: \"%s\"\n", buf, dent->d_name, strerror(errno)); + exit(1); + } + + // Set the namespace. + if (setns(fd, 0) == -1) { + fprintf(stderr, "nsenter: Failed to setns for \"%s\" with error: \"%s\"\n", dent->d_name, strerror(errno)); + exit(1); + } + close(fd); + } + closedir(dir); + + // We must fork to actually enter the PID namespace. + int child = fork(); + if (child == 0) { + // Finish executing, let the Go runtime take over. + return; + } else { + // Parent, wait for the child. + int status = 0; + if (waitpid(child, &status, 0) == -1) { + fprintf(stderr, "nsenter: Failed to waitpid with error: \"%s\"\n", strerror(errno)); + exit(1); + } + + // Forward the child's exit code or re-send its death signal. + if (WIFEXITED(status)) { + exit(WEXITSTATUS(status)); + } else if (WIFSIGNALED(status)) { + kill(getpid(), WTERMSIG(status)); + } + exit(1); + } + + return; +} + +__attribute__((constructor)) init() { + nsenter(); +} +*/ +import "C" diff --git a/pkg/libcontainer/nsinit/pid.go b/pkg/libcontainer/namespaces/pid.go similarity index 97% rename from pkg/libcontainer/nsinit/pid.go rename to pkg/libcontainer/namespaces/pid.go index bba2f10e1b..8d97ec1463 100644 --- a/pkg/libcontainer/nsinit/pid.go +++ b/pkg/libcontainer/namespaces/pid.go @@ -1,4 +1,4 @@ -package nsinit +package namespaces import ( "fmt" diff --git a/pkg/libcontainer/nsinit/std_term.go b/pkg/libcontainer/namespaces/std_term.go similarity index 97% rename from pkg/libcontainer/nsinit/std_term.go rename to pkg/libcontainer/namespaces/std_term.go index 2b8201a71b..324336af28 100644 --- a/pkg/libcontainer/nsinit/std_term.go +++ b/pkg/libcontainer/namespaces/std_term.go @@ -1,4 +1,4 @@ -package nsinit +package namespaces import ( "io" diff --git a/pkg/libcontainer/nsinit/sync_pipe.go b/pkg/libcontainer/namespaces/sync_pipe.go similarity index 98% rename from pkg/libcontainer/nsinit/sync_pipe.go rename to pkg/libcontainer/namespaces/sync_pipe.go index d0bfdda865..e12ed447fa 100644 --- a/pkg/libcontainer/nsinit/sync_pipe.go +++ b/pkg/libcontainer/namespaces/sync_pipe.go @@ -1,4 +1,4 @@ -package nsinit +package namespaces import ( "encoding/json" diff --git a/pkg/libcontainer/nsinit/term.go b/pkg/libcontainer/namespaces/term.go similarity index 95% rename from pkg/libcontainer/nsinit/term.go rename to pkg/libcontainer/namespaces/term.go index 5fc801ab53..2a50bf8554 100644 --- a/pkg/libcontainer/nsinit/term.go +++ b/pkg/libcontainer/namespaces/term.go @@ -1,4 +1,4 @@ -package nsinit +package namespaces import ( "io" diff --git a/pkg/libcontainer/nsinit/tty_term.go b/pkg/libcontainer/namespaces/tty_term.go similarity index 96% rename from pkg/libcontainer/nsinit/tty_term.go rename to pkg/libcontainer/namespaces/tty_term.go index fcbd085c82..272cf2cd65 100644 --- a/pkg/libcontainer/nsinit/tty_term.go +++ b/pkg/libcontainer/namespaces/tty_term.go @@ -1,4 +1,4 @@ -package nsinit +package namespaces import ( "io" @@ -28,10 +28,11 @@ func (t *TtyTerminal) Attach(command *exec.Cmd) error { go io.Copy(t.master, t.stdin) state, err := t.setupWindow(t.master, os.Stdin) + if err != nil { - command.Process.Kill() return err } + t.state = state return err } diff --git a/pkg/libcontainer/nsinit/unsupported.go b/pkg/libcontainer/namespaces/unsupported.go similarity index 91% rename from pkg/libcontainer/nsinit/unsupported.go rename to pkg/libcontainer/namespaces/unsupported.go index 929b3dba5b..b459b4d2f5 100644 --- a/pkg/libcontainer/nsinit/unsupported.go +++ b/pkg/libcontainer/namespaces/unsupported.go @@ -1,10 +1,10 @@ // +build !linux -package nsinit +package namespaces import ( - "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { diff --git a/pkg/libcontainer/network/network.go b/pkg/libcontainer/network/network.go index f8dee45278..85a28dc37c 100644 --- a/pkg/libcontainer/network/network.go +++ b/pkg/libcontainer/network/network.go @@ -53,8 +53,8 @@ func SetInterfaceMaster(name, master string) error { return netlink.AddToBridge(iface, masterIface) } -func SetDefaultGateway(ip string) error { - return netlink.AddDefaultGw(net.ParseIP(ip)) +func SetDefaultGateway(ip, ifaceName string) error { + return netlink.AddDefaultGw(ip, ifaceName) } func SetInterfaceIp(name string, rawIp string) error { diff --git a/pkg/libcontainer/network/veth.go b/pkg/libcontainer/network/veth.go index 3df0cd61ee..d3be221c60 100644 --- a/pkg/libcontainer/network/veth.go +++ b/pkg/libcontainer/network/veth.go @@ -12,6 +12,8 @@ import ( type Veth struct { } +const defaultDevice = "eth0" + func (v *Veth) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error { var ( bridge string @@ -56,21 +58,21 @@ func (v *Veth) Initialize(config *libcontainer.Network, context libcontainer.Con if err := InterfaceDown(vethChild); err != nil { return fmt.Errorf("interface down %s %s", vethChild, err) } - if err := ChangeInterfaceName(vethChild, "eth0"); err != nil { - return fmt.Errorf("change %s to eth0 %s", vethChild, err) + if err := ChangeInterfaceName(vethChild, defaultDevice); err != nil { + return fmt.Errorf("change %s to %s %s", vethChild, defaultDevice, err) } - if err := SetInterfaceIp("eth0", config.Address); err != nil { - return fmt.Errorf("set eth0 ip %s", err) + if err := SetInterfaceIp(defaultDevice, config.Address); err != nil { + return fmt.Errorf("set %s ip %s", defaultDevice, err) } - if err := SetMtu("eth0", config.Mtu); err != nil { - return fmt.Errorf("set eth0 mtu to %d %s", config.Mtu, err) + if err := SetMtu(defaultDevice, config.Mtu); err != nil { + return fmt.Errorf("set %s mtu to %d %s", defaultDevice, config.Mtu, err) } - if err := InterfaceUp("eth0"); err != nil { - return fmt.Errorf("eth0 up %s", err) + if err := InterfaceUp(defaultDevice); err != nil { + return fmt.Errorf("%s up %s", defaultDevice, err) } if config.Gateway != "" { - if err := SetDefaultGateway(config.Gateway); err != nil { - return fmt.Errorf("set gateway to %s %s", config.Gateway, err) + if err := SetDefaultGateway(config.Gateway, defaultDevice); err != nil { + return fmt.Errorf("set gateway to %s on device %s failed with %s", config.Gateway, defaultDevice, err) } } return nil diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 5d0d772a0f..d4ce1ca8c4 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -1,171 +1,76 @@ -// +build linux - -package nsinit +package main import ( + "fmt" + "log" "os" "os/exec" - "syscall" + "os/signal" - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/pkg/cgroups/fs" - "github.com/dotcloud/docker/pkg/cgroups/systemd" + "github.com/codegangsta/cli" "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/network" - "github.com/dotcloud/docker/pkg/system" + "github.com/dotcloud/docker/pkg/libcontainer/namespaces" ) -// Exec performes setup outside of a namespace so that a container can be -// executed. Exec is a high level function for working with container namespaces. -func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { +var execCommand = cli.Command{ + Name: "exec", + Usage: "execute a new command inside a container", + Action: execAction, +} + +func execAction(context *cli.Context) { + var nspid, exitCode int + + container, err := loadContainer() + if err != nil { + log.Fatal(err) + } + + if nspid, err = readPid(); err != nil && !os.IsNotExist(err) { + log.Fatalf("unable to read pid: %s", err) + } + + if nspid > 0 { + err = namespaces.ExecIn(container, nspid, []string(context.Args())) + } else { + term := namespaces.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) + exitCode, err = startContainer(container, term, dataPath, []string(context.Args())) + } + + if err != nil { + log.Fatalf("failed to exec: %s", err) + } + + os.Exit(exitCode) +} + +// startContainer starts the container. Returns the exit status or -1 and an +// error. +// +// Signals sent to the current process will be forwarded to container. +func startContainer(container *libcontainer.Container, term namespaces.Terminal, dataPath string, args []string) (int, error) { var ( - master *os.File - console string - err error + cmd *exec.Cmd + sigc = make(chan os.Signal, 10) ) - // create a pipe so that we can syncronize with the namespaced process and - // pass the veth name to the child - syncPipe, err := NewSyncPipe() - if err != nil { - return -1, err - } + signal.Notify(sigc) - if container.Tty { - master, console, err = system.CreateMasterAndConsole() - if err != nil { - return -1, err + createCommand := func(container *libcontainer.Container, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + cmd = namespaces.DefaultCreateCommand(container, console, rootfs, dataPath, init, pipe, args) + if logPath != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("log=%s", logPath)) } - term.SetMaster(master) + return cmd } - command := createCommand(container, console, rootfs, dataPath, os.Args[0], syncPipe.child, args) - if err := term.Attach(command); err != nil { - return -1, err - } - defer term.Close() - - if err := command.Start(); err != nil { - return -1, err - } - - started, err := system.GetProcessStartTime(command.Process.Pid) - if err != nil { - return -1, err - } - if err := WritePid(dataPath, command.Process.Pid, started); err != nil { - command.Process.Kill() - return -1, err - } - defer DeletePid(dataPath) - - // Do this before syncing with child so that no children - // can escape the cgroup - cleaner, err := SetupCgroups(container, command.Process.Pid) - if err != nil { - command.Process.Kill() - return -1, err - } - if cleaner != nil { - defer cleaner.Cleanup() - } - - if err := InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { - command.Process.Kill() - return -1, err - } - - // Sync with child - syncPipe.Close() - - if startCallback != nil { - startCallback() - } - - if err := command.Wait(); err != nil { - if _, ok := err.(*exec.ExitError); !ok { - return -1, err - } - } - return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil -} - -// DefaultCreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces -// defined on the container's configuration and use the current binary as the init with the -// args provided -// -// console: the /dev/console to setup inside the container -// init: the progam executed inside the namespaces -// root: the path to the container json file and information -// pipe: sync pipe to syncronize the parent and child processes -// args: the arguemnts to pass to the container to run as the user's program -func DefaultCreateCommand(container *libcontainer.Container, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { - // get our binary name from arg0 so we can always reexec ourself - env := []string{ - "console=" + console, - "pipe=3", - "data_path=" + dataPath, - } - - /* - TODO: move user and wd into env - if user != "" { - env = append(env, "user="+user) - } - if workingDir != "" { - env = append(env, "wd="+workingDir) - } - */ - - command := exec.Command(init, append([]string{"init"}, args...)...) - // make sure the process is executed inside the context of the rootfs - command.Dir = rootfs - command.Env = append(os.Environ(), env...) - - system.SetCloneFlags(command, uintptr(GetNamespaceFlags(container.Namespaces))) - command.ExtraFiles = []*os.File{pipe} - - return command -} - -// SetupCgroups applies the cgroup restrictions to the process running in the contaienr based -// on the container's configuration -func SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) { - if container.Cgroups != nil { - c := container.Cgroups - if systemd.UseSystemd() { - return systemd.Apply(c, nspid) - } - return fs.Apply(c, nspid) - } - return nil, nil -} - -// InitializeNetworking creates the container's network stack outside of the namespace and moves -// interfaces into the container's net namespaces if necessary -func InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error { - context := libcontainer.Context{} - for _, config := range container.Networks { - strategy, err := network.GetStrategy(config.Type) - if err != nil { - return err - } - if err := strategy.Create(config, nspid, context); err != nil { - return err - } - } - return pipe.SendToChild(context) -} - -// GetNamespaceFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare, and setns -func GetNamespaceFlags(namespaces map[string]bool) (flag int) { - for key, enabled := range namespaces { - if enabled { - if ns := libcontainer.GetNamespace(key); ns != nil { - flag |= ns.Value + startCallback := func() { + go func() { + for sig := range sigc { + cmd.Process.Signal(sig) } - } + }() } - return flag + + return namespaces.Exec(container, term, "", dataPath, args, createCommand, startCallback) } diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go deleted file mode 100644 index 40b95093dd..0000000000 --- a/pkg/libcontainer/nsinit/execin.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build linux - -package nsinit - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "syscall" - - "github.com/dotcloud/docker/pkg/label" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/mount" - "github.com/dotcloud/docker/pkg/system" -) - -// ExecIn uses an existing pid and joins the pid's namespaces with the new command. -func ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { - // clear the current processes env and replace it with the environment - // defined on the container - if err := LoadContainerEnvironment(container); err != nil { - return -1, err - } - - for key, enabled := range container.Namespaces { - // skip the PID namespace on unshare because it it not supported - if enabled && key != "NEWPID" { - if ns := libcontainer.GetNamespace(key); ns != nil { - if err := system.Unshare(ns.Value); err != nil { - return -1, err - } - } - } - } - fds, err := getNsFds(nspid, container) - closeFds := func() { - for _, f := range fds { - system.Closefd(f) - } - } - if err != nil { - closeFds() - return -1, err - } - processLabel, err := label.GetPidCon(nspid) - if err != nil { - closeFds() - return -1, err - } - // foreach namespace fd, use setns to join an existing container's namespaces - for _, fd := range fds { - if fd > 0 { - if err := system.Setns(fd, 0); err != nil { - closeFds() - return -1, fmt.Errorf("setns %s", err) - } - } - system.Closefd(fd) - } - - // if the container has a new pid and mount namespace we need to - // remount proc and sys to pick up the changes - if container.Namespaces["NEWNS"] && container.Namespaces["NEWPID"] { - pid, err := system.Fork() - if err != nil { - return -1, err - } - if pid == 0 { - // TODO: make all raw syscalls to be fork safe - if err := system.Unshare(syscall.CLONE_NEWNS); err != nil { - return -1, err - } - if err := mount.RemountProc(); err != nil { - return -1, fmt.Errorf("remount proc %s", err) - } - if err := mount.RemountSys(); err != nil { - return -1, fmt.Errorf("remount sys %s", err) - } - goto dropAndExec - } - proc, err := os.FindProcess(pid) - if err != nil { - return -1, err - } - state, err := proc.Wait() - if err != nil { - return -1, err - } - os.Exit(state.Sys().(syscall.WaitStatus).ExitStatus()) - } -dropAndExec: - if err := FinalizeNamespace(container); err != nil { - return -1, err - } - err = label.SetProcessLabel(processLabel) - if err != nil { - return -1, err - } - if err := system.Execv(args[0], args[0:], container.Env); err != nil { - return -1, err - } - panic("unreachable") -} - -func getNsFds(pid int, container *libcontainer.Container) ([]uintptr, error) { - fds := []uintptr{} - - for key, enabled := range container.Namespaces { - if enabled { - if ns := libcontainer.GetNamespace(key); ns != nil { - f, err := os.OpenFile(filepath.Join("/proc/", strconv.Itoa(pid), "ns", ns.File), os.O_RDONLY, 0) - if err != nil { - return fds, err - } - fds = append(fds, f.Fd()) - } - } - } - return fds, nil -} diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 3bbcfcc654..20096f0218 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -1,160 +1,48 @@ -// +build linux - -package nsinit +package main import ( - "fmt" + "log" "os" - "runtime" - "strings" - "syscall" + "strconv" - "github.com/dotcloud/docker/pkg/apparmor" - "github.com/dotcloud/docker/pkg/label" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/console" - "github.com/dotcloud/docker/pkg/libcontainer/mount" - "github.com/dotcloud/docker/pkg/libcontainer/network" - "github.com/dotcloud/docker/pkg/libcontainer/security/capabilities" - "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" - "github.com/dotcloud/docker/pkg/libcontainer/utils" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/pkg/user" + "github.com/codegangsta/cli" + "github.com/dotcloud/docker/pkg/libcontainer/namespaces" ) -// Init is the init process that first runs inside a new namespace to setup mounts, users, networking, -// and other options required for the new container. -func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error { - rootfs, err := utils.ResolveRootfs(uncleanRootfs) +var ( + dataPath = os.Getenv("data_path") + console = os.Getenv("console") + rawPipeFd = os.Getenv("pipe") + + initCommand = cli.Command{ + Name: "init", + Usage: "runs the init process inside the namespace", + Action: initAction, + } +) + +func initAction(context *cli.Context) { + container, err := loadContainer() if err != nil { - return err + log.Fatal(err) } - // clear the current processes env and replace it with the environment - // defined on the container - if err := LoadContainerEnvironment(container); err != nil { - return err - } - - // We always read this as it is a way to sync with the parent as well - context, err := syncPipe.ReadFromParent() + rootfs, err := os.Getwd() if err != nil { - syncPipe.Close() - return err - } - syncPipe.Close() - - if consolePath != "" { - if err := console.OpenAndDup(consolePath); err != nil { - return err - } - } - if _, err := system.Setsid(); err != nil { - return fmt.Errorf("setsid %s", err) - } - if consolePath != "" { - if err := system.Setctty(); err != nil { - return fmt.Errorf("setctty %s", err) - } - } - if err := setupNetwork(container, context); err != nil { - return fmt.Errorf("setup networking %s", err) + log.Fatal(err) } - label.Init() - - if err := mount.InitializeMountNamespace(rootfs, consolePath, container); err != nil { - return fmt.Errorf("setup mount namespace %s", err) - } - if container.Hostname != "" { - if err := system.Sethostname(container.Hostname); err != nil { - return fmt.Errorf("sethostname %s", err) - } - } - - runtime.LockOSThread() - - if err := apparmor.ApplyProfile(container.Context["apparmor_profile"]); err != nil { - return fmt.Errorf("set apparmor profile %s: %s", container.Context["apparmor_profile"], err) - } - if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { - return fmt.Errorf("set process label %s", err) - } - if container.Context["restrictions"] != "" { - if err := restrict.Restrict("proc", "sys"); err != nil { - return err - } - } - if err := FinalizeNamespace(container); err != nil { - return fmt.Errorf("finalize namespace %s", err) - } - return system.Execv(args[0], args[0:], container.Env) -} - -// SetupUser changes the groups, gid, and uid for the user inside the container -func SetupUser(u string) error { - uid, gid, suppGids, err := user.GetUserGroupSupplementary(u, syscall.Getuid(), syscall.Getgid()) + pipeFd, err := strconv.Atoi(rawPipeFd) if err != nil { - return fmt.Errorf("get supplementary groups %s", err) + log.Fatal(err) } - if err := system.Setgroups(suppGids); err != nil { - return fmt.Errorf("setgroups %s", err) - } - if err := system.Setgid(gid); err != nil { - return fmt.Errorf("setgid %s", err) - } - if err := system.Setuid(uid); err != nil { - return fmt.Errorf("setuid %s", err) - } - return nil -} -// setupVethNetwork uses the Network config if it is not nil to initialize -// the new veth interface inside the container for use by changing the name to eth0 -// setting the MTU and IP address along with the default gateway -func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error { - for _, config := range container.Networks { - strategy, err := network.GetStrategy(config.Type) - if err != nil { - return err - } + syncPipe, err := namespaces.NewSyncPipeFromFd(0, uintptr(pipeFd)) + if err != nil { + log.Fatalf("unable to create sync pipe: %s", err) + } - err1 := strategy.Initialize(config, context) - if err1 != nil { - return err1 - } + if err := namespaces.Init(container, rootfs, console, syncPipe, []string(context.Args())); err != nil { + log.Fatalf("unable to initialize for container: %s", err) } - return nil -} - -// FinalizeNamespace drops the caps, sets the correct user -// and working dir, and closes any leaky file descriptors -// before execing the command inside the namespace -func FinalizeNamespace(container *libcontainer.Container) error { - if err := capabilities.DropCapabilities(container); err != nil { - return fmt.Errorf("drop capabilities %s", err) - } - if err := system.CloseFdsFrom(3); err != nil { - return fmt.Errorf("close open file descriptors %s", err) - } - if err := SetupUser(container.User); err != nil { - return fmt.Errorf("setup user %s", err) - } - if container.WorkingDir != "" { - if err := system.Chdir(container.WorkingDir); err != nil { - return fmt.Errorf("chdir to %s %s", container.WorkingDir, err) - } - } - return nil -} - -func LoadContainerEnvironment(container *libcontainer.Container) error { - os.Clearenv() - for _, pair := range container.Env { - p := strings.SplitN(pair, "=", 2) - if err := os.Setenv(p[0], p[1]); err != nil { - return err - } - } - return nil } diff --git a/pkg/libcontainer/nsinit/main.go b/pkg/libcontainer/nsinit/main.go new file mode 100644 index 0000000000..20132de0e0 --- /dev/null +++ b/pkg/libcontainer/nsinit/main.go @@ -0,0 +1,40 @@ +package main + +import ( + "log" + "os" + + "github.com/codegangsta/cli" +) + +var logPath = os.Getenv("log") + +func preload(context *cli.Context) error { + if logPath != "" { + if err := openLog(logPath); err != nil { + return err + } + } + + return nil +} + +func main() { + app := cli.NewApp() + app.Name = "nsinit" + app.Version = "0.1" + app.Author = "libcontainer maintainers" + + app.Before = preload + app.Commands = []cli.Command{ + execCommand, + initCommand, + statsCommand, + specCommand, + nsenterCommand, + } + + if err := app.Run(os.Args); err != nil { + log.Fatal(err) + } +} diff --git a/pkg/libcontainer/nsinit/nsenter.go b/pkg/libcontainer/nsinit/nsenter.go new file mode 100644 index 0000000000..54644282d4 --- /dev/null +++ b/pkg/libcontainer/nsinit/nsenter.go @@ -0,0 +1,40 @@ +package main + +import ( + "log" + "strconv" + + "github.com/codegangsta/cli" + "github.com/dotcloud/docker/pkg/libcontainer/namespaces" +) + +var nsenterCommand = cli.Command{ + Name: "nsenter", + Usage: "init process for entering an existing namespace", + Action: nsenterAction, +} + +func nsenterAction(context *cli.Context) { + args := context.Args() + if len(args) < 4 { + log.Fatalf("incorrect usage: ...") + } + + container, err := loadContainerFromJson(args.Get(2)) + if err != nil { + log.Fatalf("unable to load container: %s", err) + } + + nspid, err := strconv.Atoi(args.Get(0)) + if err != nil { + log.Fatalf("unable to read pid: %s from %q", err, args.Get(0)) + } + + if nspid <= 0 { + log.Fatalf("cannot enter into namespaces without valid pid: %q", nspid) + } + + if err := namespaces.NsEnter(container, args.Get(1), nspid, args[3:]); err != nil { + log.Fatalf("failed to nsenter: %s", err) + } +} diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go deleted file mode 100644 index b5325d40b3..0000000000 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "encoding/json" - "io/ioutil" - "log" - "os" - "path/filepath" - "strconv" - - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/nsinit" -) - -var ( - dataPath = os.Getenv("data_path") - console = os.Getenv("console") - rawPipeFd = os.Getenv("pipe") -) - -func main() { - if len(os.Args) < 2 { - log.Fatalf("invalid number of arguments %d", len(os.Args)) - } - - container, err := loadContainer() - if err != nil { - log.Fatalf("unable to load container: %s", err) - } - - switch os.Args[1] { - case "exec": // this is executed outside of the namespace in the cwd - var nspid, exitCode int - if nspid, err = readPid(); err != nil && !os.IsNotExist(err) { - log.Fatalf("unable to read pid: %s", err) - } - - if nspid > 0 { - exitCode, err = nsinit.ExecIn(container, nspid, os.Args[2:]) - } else { - term := nsinit.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) - exitCode, err = nsinit.Exec(container, term, "", dataPath, os.Args[2:], nsinit.DefaultCreateCommand, nil) - } - - if err != nil { - log.Fatalf("failed to exec: %s", err) - } - os.Exit(exitCode) - case "init": // this is executed inside of the namespace to setup the container - // by default our current dir is always our rootfs - rootfs, err := os.Getwd() - if err != nil { - log.Fatal(err) - } - - pipeFd, err := strconv.Atoi(rawPipeFd) - if err != nil { - log.Fatal(err) - } - syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd)) - if err != nil { - log.Fatalf("unable to create sync pipe: %s", err) - } - - if err := nsinit.Init(container, rootfs, console, syncPipe, os.Args[2:]); err != nil { - log.Fatalf("unable to initialize for container: %s", err) - } - default: - log.Fatalf("command not supported for nsinit %s", os.Args[0]) - } -} - -func loadContainer() (*libcontainer.Container, error) { - f, err := os.Open(filepath.Join(dataPath, "container.json")) - if err != nil { - return nil, err - } - defer f.Close() - - var container *libcontainer.Container - if err := json.NewDecoder(f).Decode(&container); err != nil { - return nil, err - } - return container, nil -} - -func readPid() (int, error) { - data, err := ioutil.ReadFile(filepath.Join(dataPath, "pid")) - if err != nil { - return -1, err - } - pid, err := strconv.Atoi(string(data)) - if err != nil { - return -1, err - } - return pid, nil -} diff --git a/pkg/libcontainer/nsinit/spec.go b/pkg/libcontainer/nsinit/spec.go new file mode 100644 index 0000000000..2eb4da9fc5 --- /dev/null +++ b/pkg/libcontainer/nsinit/spec.go @@ -0,0 +1,40 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/codegangsta/cli" + "github.com/dotcloud/docker/pkg/libcontainer" +) + +var specCommand = cli.Command{ + Name: "spec", + Usage: "display the container specification", + Action: specAction, +} + +func specAction(context *cli.Context) { + container, err := loadContainer() + if err != nil { + log.Fatal(err) + } + + spec, err := getContainerSpec(container) + if err != nil { + log.Fatalf("Failed to get spec - %v\n", err) + } + + fmt.Printf("Spec:\n%v\n", spec) +} + +// returns the container spec in json format. +func getContainerSpec(container *libcontainer.Container) (string, error) { + spec, err := json.MarshalIndent(container, "", "\t") + if err != nil { + return "", err + } + + return string(spec), nil +} diff --git a/pkg/libcontainer/nsinit/stats.go b/pkg/libcontainer/nsinit/stats.go new file mode 100644 index 0000000000..023b40a822 --- /dev/null +++ b/pkg/libcontainer/nsinit/stats.go @@ -0,0 +1,46 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/codegangsta/cli" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/cgroups/fs" +) + +var statsCommand = cli.Command{ + Name: "stats", + Usage: "display statistics for the container", + Action: statsAction, +} + +func statsAction(context *cli.Context) { + container, err := loadContainer() + if err != nil { + log.Fatal(err) + } + + stats, err := getContainerStats(container) + if err != nil { + log.Fatalf("Failed to get stats - %v\n", err) + } + + fmt.Printf("Stats:\n%v\n", stats) +} + +// returns the container stats in json format. +func getContainerStats(container *libcontainer.Container) (string, error) { + stats, err := fs.GetStats(container.Cgroups) + if err != nil { + return "", err + } + + out, err := json.MarshalIndent(stats, "", "\t") + if err != nil { + return "", err + } + + return string(out), nil +} diff --git a/pkg/libcontainer/nsinit/utils.go b/pkg/libcontainer/nsinit/utils.go new file mode 100644 index 0000000000..9926e2721f --- /dev/null +++ b/pkg/libcontainer/nsinit/utils.go @@ -0,0 +1,62 @@ +package main + +import ( + "encoding/json" + "io/ioutil" + "log" + "os" + "path/filepath" + "strconv" + + "github.com/dotcloud/docker/pkg/libcontainer" +) + +func loadContainer() (*libcontainer.Container, error) { + f, err := os.Open(filepath.Join(dataPath, "container.json")) + if err != nil { + return nil, err + } + defer f.Close() + + var container *libcontainer.Container + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + + return container, nil +} + +func readPid() (int, error) { + data, err := ioutil.ReadFile(filepath.Join(dataPath, "pid")) + if err != nil { + return -1, err + } + + pid, err := strconv.Atoi(string(data)) + if err != nil { + return -1, err + } + + return pid, nil +} + +func openLog(name string) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) + if err != nil { + return err + } + + log.SetOutput(f) + + return nil +} + +func loadContainerFromJson(rawData string) (*libcontainer.Container, error) { + var container *libcontainer.Container + + if err := json.Unmarshal([]byte(rawData), &container); err != nil { + return nil, err + } + + return container, nil +} diff --git a/pkg/libcontainer/security/capabilities/capabilities.go b/pkg/libcontainer/security/capabilities/capabilities.go index ad13e672c7..64ea961a18 100644 --- a/pkg/libcontainer/security/capabilities/capabilities.go +++ b/pkg/libcontainer/security/capabilities/capabilities.go @@ -7,32 +7,51 @@ import ( "github.com/syndtr/gocapability/capability" ) -// DropCapabilities drops capabilities for the current process based -// on the container's configuration. -func DropCapabilities(container *libcontainer.Container) error { - if drop := getCapabilitiesMask(container); len(drop) > 0 { - c, err := capability.NewPid(os.Getpid()) - if err != nil { - return err - } - c.Unset(capability.CAPS|capability.BOUNDS, drop...) +const allCapabilityTypes = capability.CAPS | capability.BOUNDS - if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { - return err - } +// DropBoundingSet drops the capability bounding set to those specified in the +// container configuration. +func DropBoundingSet(container *libcontainer.Container) error { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + keep := getEnabledCapabilities(container) + c.Clear(capability.BOUNDS) + c.Set(capability.BOUNDS, keep...) + + if err := c.Apply(capability.BOUNDS); err != nil { + return err + } + + return nil +} + +// DropCapabilities drops all capabilities for the current process expect those specified in the container configuration. +func DropCapabilities(container *libcontainer.Container) error { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + keep := getEnabledCapabilities(container) + c.Clear(allCapabilityTypes) + c.Set(allCapabilityTypes, keep...) + + if err := c.Apply(allCapabilityTypes); err != nil { + return err } return nil } -// getCapabilitiesMask returns the specific cap mask values for the libcontainer types -func getCapabilitiesMask(container *libcontainer.Container) []capability.Cap { - drop := []capability.Cap{} - for key, enabled := range container.CapabilitiesMask { - if !enabled { - if c := libcontainer.GetCapability(key); c != nil { - drop = append(drop, c.Value) - } +// getEnabledCapabilities returns the capabilities that should not be dropped by the container. +func getEnabledCapabilities(container *libcontainer.Container) []capability.Cap { + keep := []capability.Cap{} + for _, capability := range container.Capabilities { + if c := libcontainer.GetCapability(capability); c != nil { + keep = append(keep, c.Value) } } - return drop + return keep } diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index e1296b1d7f..2dadc4fff6 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -4,22 +4,49 @@ package restrict import ( "fmt" + "os" "syscall" + "time" "github.com/dotcloud/docker/pkg/system" ) +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +func mountReadonly(path string) error { + for i := 0; i < 5; i++ { + if err := system.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil { + switch err { + case syscall.EINVAL: + // Probably not a mountpoint, use bind-mount + if err := system.Mount(path, path, "", syscall.MS_BIND, ""); err != nil { + return err + } + return system.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "") + case syscall.EBUSY: + time.Sleep(100 * time.Millisecond) + continue + default: + return err + } + } + return nil + } + return fmt.Errorf("unable to mount %s as readonly max retries reached", path) +} + // This has to be called while the container still has CAP_SYS_ADMIN (to be able to perform mounts). // However, afterwards, CAP_SYS_ADMIN should be dropped (otherwise the user will be able to revert those changes). func Restrict(mounts ...string) error { // remount proc and sys as readonly for _, dest := range mounts { - if err := system.Mount("", dest, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil { + if err := mountReadonly(dest); err != nil { return fmt.Errorf("unable to remount %s readonly: %s", dest, err) } } - if err := system.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil { - return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore") + + if err := system.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore: %s", err) } return nil } diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go index 8f056c817d..834201036f 100644 --- a/pkg/libcontainer/types.go +++ b/pkg/libcontainer/types.go @@ -55,6 +55,29 @@ var ( {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, {Key: "SYSLOG", Value: capability.CAP_SYSLOG}, + {Key: "SETUID", Value: capability.CAP_SETUID}, + {Key: "SETGID", Value: capability.CAP_SETGID}, + {Key: "CHOWN", Value: capability.CAP_CHOWN}, + {Key: "NET_RAW", Value: capability.CAP_NET_RAW}, + {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE}, + {Key: "FOWNER", Value: capability.CAP_FOWNER}, + {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH}, + {Key: "FSETID", Value: capability.CAP_FSETID}, + {Key: "KILL", Value: capability.CAP_KILL}, + {Key: "SETGID", Value: capability.CAP_SETGID}, + {Key: "SETUID", Value: capability.CAP_SETUID}, + {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE}, + {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE}, + {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST}, + {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK}, + {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER}, + {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT}, + {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE}, + {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT}, + {Key: "LEASE", Value: capability.CAP_LEASE}, + {Key: "SETFCAP", Value: capability.CAP_SETFCAP}, + {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM}, + {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND}, } ) @@ -118,6 +141,14 @@ func GetCapability(key string) *Capability { return nil } +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + // Contains returns true if the specified Capability is // in the slice func (c Capabilities) Contains(capp string) bool { diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index ed85a4a4c5..2cfef331f6 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -51,6 +51,8 @@ Command line flag syntax: -flag -flag=x + -flag="x" + -flag='x' -flag x // non-boolean flags only One or two minus signs may be used; they are equivalent. The last form is not permitted for boolean flags because the @@ -775,6 +777,37 @@ func (f *FlagSet) usage() { } } +func trimQuotes(str string) string { + type quote struct { + start, end byte + } + + // All valid quote types. + quotes := []quote{ + // Double quotes + { + start: '"', + end: '"', + }, + + // Single quotes + { + start: '\'', + end: '\'', + }, + } + + for _, quote := range quotes { + // Only strip if outermost match. + if str[0] == quote.start && str[len(str)-1] == quote.end { + str = str[1 : len(str)-1] + break + } + } + + return str +} + // parseOne parses one flag. It reports whether a flag was seen. func (f *FlagSet) parseOne() (bool, string, error) { if len(f.args) == 0 { @@ -799,7 +832,7 @@ func (f *FlagSet) parseOne() (bool, string, error) { value := "" for i := 1; i < len(name); i++ { // equals cannot be first if name[i] == '=' { - value = name[i+1:] + value = trimQuotes(name[i+1:]) has_value = true name = name[0:i] break diff --git a/pkg/mflag/flag_test.go b/pkg/mflag/flag_test.go index b9e8a0ef3e..4c2222e54b 100644 --- a/pkg/mflag/flag_test.go +++ b/pkg/mflag/flag_test.go @@ -173,6 +173,12 @@ func testParse(f *FlagSet, t *testing.T) { uintFlag := f.Uint([]string{"uint"}, 0, "uint value") uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") stringFlag := f.String([]string{"string"}, "0", "string value") + singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") + doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") + mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") + mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") + nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") + nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") extra := "one-extra-argument" @@ -184,6 +190,12 @@ func testParse(f *FlagSet, t *testing.T) { "-uint", "24", "--uint64", "25", "-string", "hello", + "-squote='single'", + `-dquote="double"`, + `-mquote='mixed"`, + `-mquote2="mixed2'`, + `-nquote="'single nested'"`, + `-nquote2='"double nested"'`, "-float64", "2718e28", "-duration", "2m", extra, @@ -215,6 +227,24 @@ func testParse(f *FlagSet, t *testing.T) { if *stringFlag != "hello" { t.Error("string flag should be `hello`, is ", *stringFlag) } + if *singleQuoteFlag != "single" { + t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) + } + if *doubleQuoteFlag != "double" { + t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) + } + if *mixedQuoteFlag != `'mixed"` { + t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) + } + if *mixed2QuoteFlag != `"mixed2'` { + t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) + } + if *nestedQuoteFlag != "'single nested'" { + t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) + } + if *nested2QuoteFlag != `"double nested"` { + t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) + } if *float64Flag != 2718e28 { t.Error("float64 flag should be 2718e28, is ", *float64Flag) } diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go index 07fadf8171..a89e5b29e2 100644 --- a/pkg/namesgenerator/names-generator.go +++ b/pkg/namesgenerator/names-generator.go @@ -6,10 +6,6 @@ import ( "time" ) -type NameChecker interface { - Exists(name string) bool -} - var ( left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil"} // Docker 0.7.x generates names from notable scientists and hackers. @@ -79,16 +75,17 @@ var ( right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley", "goldstine", "hoover", "hopper", "bartik", "sammet", "jones", "perlman", "wilson", "kowalevski", "hypatia", "goodall", "mayer", "elion", "blackwell", "lalande", "kirch", "ardinghelli", "colden", "almeida", "leakey", "meitner", "mestorf", "rosalind", "sinoussi", "carson", "mcclintock", "yonath"} ) -func GenerateRandomName(checker NameChecker) (string, error) { - retry := 5 +func GetRandomName(retry int) string { rand.Seed(time.Now().UnixNano()) + +begin: name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) - for checker != nil && checker.Exists(name) && retry > 0 || name == "boring_wozniak" /* Steve Wozniak is not boring */ { + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { name = fmt.Sprintf("%s%d", name, rand.Intn(10)) - retry = retry - 1 } - if retry == 0 { - return name, fmt.Errorf("Error generating random name") - } - return name, nil + return name } diff --git a/pkg/namesgenerator/names-generator_test.go b/pkg/namesgenerator/names-generator_test.go index bcee7c86a7..2652d42ab4 100644 --- a/pkg/namesgenerator/names-generator_test.go +++ b/pkg/namesgenerator/names-generator_test.go @@ -4,35 +4,9 @@ import ( "testing" ) -type FalseChecker struct{} - -func (n *FalseChecker) Exists(name string) bool { - return false -} - -type TrueChecker struct{} - -func (n *TrueChecker) Exists(name string) bool { - return true -} - -func TestGenerateRandomName(t *testing.T) { - if _, err := GenerateRandomName(&FalseChecker{}); err != nil { - t.Error(err) - } - - if _, err := GenerateRandomName(&TrueChecker{}); err == nil { - t.Error("An error was expected") - } - -} - // Make sure the generated names are awesome func TestGenerateAwesomeNames(t *testing.T) { - name, err := GenerateRandomName(&FalseChecker{}) - if err != nil { - t.Error(err) - } + name := GetRandomName(0) if !isAwesome(name) { t.Fatalf("Generated name '%s' is not awesome.", name) } diff --git a/pkg/netlink/netlink_linux.go b/pkg/netlink/netlink_linux.go index 6de293d42a..14e30aa026 100644 --- a/pkg/netlink/netlink_linux.go +++ b/pkg/netlink/netlink_linux.go @@ -7,6 +7,7 @@ import ( "fmt" "math/rand" "net" + "sync/atomic" "syscall" "unsafe" ) @@ -22,7 +23,7 @@ const ( SIOC_BRADDIF = 0x89a2 ) -var nextSeqNr int +var nextSeqNr uint32 type ifreqHwaddr struct { IfrnName [16]byte @@ -42,11 +43,6 @@ func nativeEndian() binary.ByteOrder { return binary.LittleEndian } -func getSeq() int { - nextSeqNr = nextSeqNr + 1 - return nextSeqNr -} - func getIpFamily(ip net.IP) int { if len(ip) <= net.IPv4len { return syscall.AF_INET @@ -131,10 +127,9 @@ type RtMsg struct { syscall.RtMsg } -func newRtMsg(family int) *RtMsg { +func newRtMsg() *RtMsg { return &RtMsg{ RtMsg: syscall.RtMsg{ - Family: uint8(family), Table: syscall.RT_TABLE_MAIN, Scope: syscall.RT_SCOPE_UNIVERSE, Protocol: syscall.RTPROT_BOOT, @@ -267,7 +262,7 @@ func newNetlinkRequest(proto, flags int) *NetlinkRequest { Len: uint32(syscall.NLMSG_HDRLEN), Type: uint16(proto), Flags: syscall.NLM_F_REQUEST | uint16(flags), - Seq: uint32(getSeq()), + Seq: atomic.AddUint32(&nextSeqNr, 1), }, } } @@ -367,40 +362,118 @@ done: return nil } -// Add a new default gateway. Identical to: -// ip route add default via $ip -func AddDefaultGw(ip net.IP) error { +// Add a new route table entry. +func AddRoute(destination, source, gateway, device string) error { + if destination == "" && source == "" && gateway == "" { + return fmt.Errorf("one of destination, source or gateway must not be blank") + } + s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() - family := getIpFamily(ip) - wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + msg := newRtMsg() + currentFamily := -1 + var rtAttrs []*RtAttr - msg := newRtMsg(family) - wb.AddData(msg) - - var ipData []byte - if family == syscall.AF_INET { - ipData = ip.To4() - } else { - ipData = ip.To16() + if destination != "" { + destIP, destNet, err := net.ParseCIDR(destination) + if err != nil { + return fmt.Errorf("destination CIDR %s couldn't be parsed", destination) + } + destFamily := getIpFamily(destIP) + currentFamily = destFamily + destLen, bits := destNet.Mask.Size() + if destLen == 0 && bits == 0 { + return fmt.Errorf("destination CIDR %s generated a non-canonical Mask", destination) + } + msg.Family = uint8(destFamily) + msg.Dst_len = uint8(destLen) + var destData []byte + if destFamily == syscall.AF_INET { + destData = destIP.To4() + } else { + destData = destIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_DST, destData)) } - gateway := newRtAttr(syscall.RTA_GATEWAY, ipData) + if source != "" { + srcIP, srcNet, err := net.ParseCIDR(source) + if err != nil { + return fmt.Errorf("source CIDR %s couldn't be parsed", source) + } + srcFamily := getIpFamily(srcIP) + if currentFamily != -1 && currentFamily != srcFamily { + return fmt.Errorf("source and destination ip were not the same IP family") + } + currentFamily = srcFamily + srcLen, bits := srcNet.Mask.Size() + if srcLen == 0 && bits == 0 { + return fmt.Errorf("source CIDR %s generated a non-canonical Mask", source) + } + msg.Family = uint8(srcFamily) + msg.Src_len = uint8(srcLen) + var srcData []byte + if srcFamily == syscall.AF_INET { + srcData = srcIP.To4() + } else { + srcData = srcIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_SRC, srcData)) + } - wb.AddData(gateway) + if gateway != "" { + gwIP := net.ParseIP(gateway) + if gwIP == nil { + return fmt.Errorf("gateway IP %s couldn't be parsed", gateway) + } + gwFamily := getIpFamily(gwIP) + if currentFamily != -1 && currentFamily != gwFamily { + return fmt.Errorf("gateway, source, and destination ip were not the same IP family") + } + msg.Family = uint8(gwFamily) + var gwData []byte + if gwFamily == syscall.AF_INET { + gwData = gwIP.To4() + } else { + gwData = gwIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_GATEWAY, gwData)) + } + + wb.AddData(msg) + for _, attr := range rtAttrs { + wb.AddData(attr) + } + + var ( + native = nativeEndian() + b = make([]byte, 4) + ) + iface, err := net.InterfaceByName(device) + if err != nil { + return err + } + native.PutUint32(b, uint32(iface.Index)) + + wb.AddData(newRtAttr(syscall.RTA_OIF, b)) if err := s.Send(wb); err != nil { return err } - return s.HandleAck(wb.Seq) } +// Add a new default gateway. Identical to: +// ip route add default via $ip +func AddDefaultGw(ip, device string) error { + return AddRoute("", "", ip, device) +} + // Bring up a particular network interface func NetworkLinkUp(iface *net.Interface) error { s, err := getNetlinkSocket() diff --git a/pkg/netlink/netlink_unsupported.go b/pkg/netlink/netlink_unsupported.go index 8a5531b9ef..1359345662 100644 --- a/pkg/netlink/netlink_unsupported.go +++ b/pkg/netlink/netlink_unsupported.go @@ -27,9 +27,12 @@ func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { return ErrNotImplemented } -func AddDefaultGw(ip net.IP) error { +func AddRoute(destination, source, gateway, device string) error { return ErrNotImplemented +} +func AddDefaultGw(ip, device string) error { + return ErrNotImplemented } func NetworkSetMTU(iface *net.Interface, mtu int) error { diff --git a/pkg/cgroups/MAINTAINERS b/pkg/symlink/MAINTAINERS similarity index 59% rename from pkg/cgroups/MAINTAINERS rename to pkg/symlink/MAINTAINERS index 1e998f8ac1..68a97d2fc2 100644 --- a/pkg/cgroups/MAINTAINERS +++ b/pkg/symlink/MAINTAINERS @@ -1 +1,2 @@ Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/utils/fs.go b/pkg/symlink/fs.go similarity index 62% rename from utils/fs.go rename to pkg/symlink/fs.go index e07ced75d7..257491f91b 100644 --- a/utils/fs.go +++ b/pkg/symlink/fs.go @@ -1,42 +1,14 @@ -package utils +package symlink import ( "fmt" "os" + "path" "path/filepath" "strings" - "syscall" ) -// TreeSize walks a directory tree and returns its total size in bytes. -func TreeSize(dir string) (size int64, err error) { - data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - // Check inode to handle hard links correctly - inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { - return nil - } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = struct{}{} - - size += s - - return nil - }) - return -} +const maxLoopCounter = 100 // FollowSymlink will follow an existing link and scope it to the root // path provided. @@ -61,7 +33,14 @@ func FollowSymlinkInScope(link, root string) (string, error) { prev = filepath.Join(prev, p) prev = filepath.Clean(prev) + loopCounter := 0 for { + loopCounter++ + + if loopCounter >= maxLoopCounter { + return "", fmt.Errorf("loopCounter reached MAX: %v", loopCounter) + } + if !strings.HasPrefix(prev, root) { // Don't resolve symlinks outside of root. For example, // we don't have to check /home in the below. @@ -84,10 +63,9 @@ func FollowSymlinkInScope(link, root string) (string, error) { return "", err } - switch dest[0] { - case '/': + if path.IsAbs(dest) { prev = filepath.Join(root, dest) - case '.': + } else { prev, _ = filepath.Abs(prev) if prev = filepath.Clean(filepath.Join(filepath.Dir(prev), dest)); len(prev) < len(root) { diff --git a/utils/fs_test.go b/pkg/symlink/fs_test.go similarity index 88% rename from utils/fs_test.go rename to pkg/symlink/fs_test.go index 9affc00e91..d85fd6da74 100644 --- a/utils/fs_test.go +++ b/pkg/symlink/fs_test.go @@ -1,4 +1,4 @@ -package utils +package symlink import ( "io/ioutil" @@ -28,6 +28,19 @@ func TestFollowSymLinkNormal(t *testing.T) { } } +func TestFollowSymLinkRelativePath(t *testing.T) { + link := "testdata/fs/i" + + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/a"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } +} + func TestFollowSymLinkUnderLinkedDir(t *testing.T) { dir, err := ioutil.TempDir("", "docker-fs-test") if err != nil { diff --git a/utils/testdata/fs/a/d b/pkg/symlink/testdata/fs/a/d similarity index 100% rename from utils/testdata/fs/a/d rename to pkg/symlink/testdata/fs/a/d diff --git a/utils/testdata/fs/a/e b/pkg/symlink/testdata/fs/a/e similarity index 100% rename from utils/testdata/fs/a/e rename to pkg/symlink/testdata/fs/a/e diff --git a/utils/testdata/fs/a/f b/pkg/symlink/testdata/fs/a/f similarity index 100% rename from utils/testdata/fs/a/f rename to pkg/symlink/testdata/fs/a/f diff --git a/utils/testdata/fs/b/h b/pkg/symlink/testdata/fs/b/h similarity index 100% rename from utils/testdata/fs/b/h rename to pkg/symlink/testdata/fs/b/h diff --git a/utils/testdata/fs/g b/pkg/symlink/testdata/fs/g similarity index 100% rename from utils/testdata/fs/g rename to pkg/symlink/testdata/fs/g diff --git a/pkg/symlink/testdata/fs/i b/pkg/symlink/testdata/fs/i new file mode 120000 index 0000000000..2e65efe2a1 --- /dev/null +++ b/pkg/symlink/testdata/fs/i @@ -0,0 +1 @@ +a \ No newline at end of file diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go index 27af37bb89..bdc192b913 100644 --- a/pkg/sysinfo/sysinfo.go +++ b/pkg/sysinfo/sysinfo.go @@ -1,11 +1,12 @@ package sysinfo import ( - "github.com/dotcloud/docker/pkg/cgroups" "io/ioutil" "log" "os" "path" + + "github.com/dotcloud/docker/pkg/libcontainer/cgroups" ) type SysInfo struct { diff --git a/pkg/system/calls_linux.go b/pkg/system/calls_linux.go index cc4727aaa2..6986051e1d 100644 --- a/pkg/system/calls_linux.go +++ b/pkg/system/calls_linux.go @@ -3,6 +3,7 @@ package system import ( "os/exec" "syscall" + "unsafe" ) func Chroot(dir string) error { @@ -122,6 +123,34 @@ func ParentDeathSignal(sig uintptr) error { return nil } +func GetParentDeathSignal() (int, error) { + var sig int + + _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) + + if err != 0 { + return -1, err + } + + return sig, nil +} + +func SetKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { + return err + } + + return nil +} + +func ClearKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { + return err + } + + return nil +} + func Setctty() error { if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { return err diff --git a/pkg/testutils/MAINTAINERS b/pkg/testutils/MAINTAINERS new file mode 100644 index 0000000000..f2e8c52e51 --- /dev/null +++ b/pkg/testutils/MAINTAINERS @@ -0,0 +1,2 @@ +Solomon Hykes (@shykes) +Cristian Staretu (@unclejack) diff --git a/pkg/testutils/README.md b/pkg/testutils/README.md new file mode 100644 index 0000000000..a208a90e68 --- /dev/null +++ b/pkg/testutils/README.md @@ -0,0 +1,2 @@ +`testutils` is a collection of utility functions to facilitate the writing +of tests. It is used in various places by the Docker test suite. diff --git a/pkg/testutils/testutils.go b/pkg/testutils/testutils.go new file mode 100644 index 0000000000..4655e5844d --- /dev/null +++ b/pkg/testutils/testutils.go @@ -0,0 +1,23 @@ +package testutils + +import ( + "testing" + "time" +) + +// Timeout calls f and waits for 100ms for it to complete. +// If it doesn't, it causes the tests to fail. +// t must be a valid testing context. +func Timeout(t *testing.T, f func()) { + onTimeout := time.After(100 * time.Millisecond) + onDone := make(chan bool) + go func() { + f() + close(onDone) + }() + select { + case <-onTimeout: + t.Fatalf("timeout") + case <-onDone: + } +} diff --git a/pkg/units/MAINTAINERS b/pkg/units/MAINTAINERS new file mode 100644 index 0000000000..68a97d2fc2 --- /dev/null +++ b/pkg/units/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/pkg/units/duration.go b/pkg/units/duration.go new file mode 100644 index 0000000000..cd33121496 --- /dev/null +++ b/pkg/units/duration.go @@ -0,0 +1,31 @@ +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.) +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%f years", d.Hours()/24/365) +} diff --git a/pkg/units/size.go b/pkg/units/size.go new file mode 100644 index 0000000000..480ec2f141 --- /dev/null +++ b/pkg/units/size.go @@ -0,0 +1,92 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// HumanSize returns a human-readable approximation of a size +// using SI standard (eg. "44kB", "17MB") +func HumanSize(size int64) string { + i := 0 + var sizef float64 + sizef = float64(size) + units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + for sizef >= 1000.0 { + sizef = sizef / 1000.0 + i++ + } + return fmt.Sprintf("%.4g %s", sizef, units[i]) +} + +// FromHumanSize returns an integer from a human-readable specification of a size +// using SI standard (eg. "44kB", "17MB") +func FromHumanSize(size string) (int64, error) { + re, error := regexp.Compile("^(\\d+)([kKmMgGtTpP])?[bB]?$") + if error != nil { + return -1, fmt.Errorf("%s does not specify not a size", size) + } + + matches := re.FindStringSubmatch(size) + + if len(matches) != 3 { + return -1, fmt.Errorf("Invalid size: '%s'", size) + } + + theSize, error := strconv.ParseInt(matches[1], 10, 0) + if error != nil { + return -1, error + } + + unit := strings.ToLower(matches[2]) + + if unit == "k" { + theSize *= 1000 + } else if unit == "m" { + theSize *= 1000 * 1000 + } else if unit == "g" { + theSize *= 1000 * 1000 * 1000 + } else if unit == "t" { + theSize *= 1000 * 1000 * 1000 * 1000 + } else if unit == "p" { + theSize *= 1000 * 1000 * 1000 * 1000 * 1000 + } + + return theSize, nil +} + +// Parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes or gibibytes, and returns the +// number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (bytes int64, err error) { + re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$") + if error != nil { + return -1, error + } + + matches := re.FindStringSubmatch(size) + + if len(matches) != 3 { + return -1, fmt.Errorf("Invalid size: '%s'", size) + } + + memLimit, error := strconv.ParseInt(matches[1], 10, 0) + if error != nil { + return -1, error + } + + unit := strings.ToLower(matches[2]) + + if unit == "k" { + memLimit *= 1024 + } else if unit == "m" { + memLimit *= 1024 * 1024 + } else if unit == "g" { + memLimit *= 1024 * 1024 * 1024 + } + + return memLimit, nil +} diff --git a/pkg/units/size_test.go b/pkg/units/size_test.go new file mode 100644 index 0000000000..5240bbd9f0 --- /dev/null +++ b/pkg/units/size_test.go @@ -0,0 +1,89 @@ +package units + +import ( + "strings" + "testing" +) + +func TestHumanSize(t *testing.T) { + + size := strings.Trim(HumanSize(1000), " \t") + expect := "1 kB" + if size != expect { + t.Errorf("1000 -> expected '%s', got '%s'", expect, size) + } + + size = strings.Trim(HumanSize(1024), " \t") + expect = "1.024 kB" + if size != expect { + t.Errorf("1024 -> expected '%s', got '%s'", expect, size) + } +} + +func TestFromHumanSize(t *testing.T) { + assertFromHumanSize(t, "32", false, 32) + assertFromHumanSize(t, "32b", false, 32) + assertFromHumanSize(t, "32B", false, 32) + assertFromHumanSize(t, "32k", false, 32*1000) + assertFromHumanSize(t, "32K", false, 32*1000) + assertFromHumanSize(t, "32kb", false, 32*1000) + assertFromHumanSize(t, "32Kb", false, 32*1000) + assertFromHumanSize(t, "32Mb", false, 32*1000*1000) + assertFromHumanSize(t, "32Gb", false, 32*1000*1000*1000) + assertFromHumanSize(t, "32Tb", false, 32*1000*1000*1000*1000) + assertFromHumanSize(t, "8Pb", false, 8*1000*1000*1000*1000*1000) + + assertFromHumanSize(t, "", true, -1) + assertFromHumanSize(t, "hello", true, -1) + assertFromHumanSize(t, "-32", true, -1) + assertFromHumanSize(t, " 32 ", true, -1) + assertFromHumanSize(t, "32 mb", true, -1) + assertFromHumanSize(t, "32m b", true, -1) + assertFromHumanSize(t, "32bm", true, -1) +} + +func assertFromHumanSize(t *testing.T, size string, expectError bool, expectedBytes int64) { + actualBytes, err := FromHumanSize(size) + if (err != nil) && !expectError { + t.Errorf("Unexpected error parsing '%s': %s", size, err) + } + if (err == nil) && expectError { + t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes) + } + if actualBytes != expectedBytes { + t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes) + } +} + +func TestRAMInBytes(t *testing.T) { + assertRAMInBytes(t, "32", false, 32) + assertRAMInBytes(t, "32b", false, 32) + assertRAMInBytes(t, "32B", false, 32) + assertRAMInBytes(t, "32k", false, 32*1024) + assertRAMInBytes(t, "32K", false, 32*1024) + assertRAMInBytes(t, "32kb", false, 32*1024) + assertRAMInBytes(t, "32Kb", false, 32*1024) + assertRAMInBytes(t, "32Mb", false, 32*1024*1024) + assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024) + + assertRAMInBytes(t, "", true, -1) + assertRAMInBytes(t, "hello", true, -1) + assertRAMInBytes(t, "-32", true, -1) + assertRAMInBytes(t, " 32 ", true, -1) + assertRAMInBytes(t, "32 mb", true, -1) + assertRAMInBytes(t, "32m b", true, -1) + assertRAMInBytes(t, "32bm", true, -1) +} + +func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) { + actualBytes, err := RAMInBytes(size) + if (err != nil) && !expectError { + t.Errorf("Unexpected error parsing '%s': %s", size, err) + } + if (err == nil) && expectError { + t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes) + } + if actualBytes != expectedBytes { + t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes) + } +} diff --git a/pkg/user/user.go b/pkg/user/user.go index 1672f7e679..df47101221 100644 --- a/pkg/user/user.go +++ b/pkg/user/user.go @@ -9,6 +9,15 @@ import ( "strings" ) +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) +) + type User struct { Name string Pass string @@ -194,6 +203,9 @@ func GetUserGroupSupplementary(userSpec string, defaultUid int, defaultGid int) // not numeric - we have to bail return 0, 0, nil, fmt.Errorf("Unable to find user %v", userArg) } + if uid < minId || uid > maxId { + return 0, 0, nil, ErrRange + } // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit } @@ -226,6 +238,9 @@ func GetUserGroupSupplementary(userSpec string, defaultUid int, defaultGid int) // not numeric - we have to bail return 0, 0, nil, fmt.Errorf("Unable to find group %v", groupArg) } + if gid < minId || gid > maxId { + return 0, 0, nil, ErrRange + } // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit } diff --git a/registry/MAINTAINERS b/registry/MAINTAINERS index bf3984f5f9..af791fb40c 100644 --- a/registry/MAINTAINERS +++ b/registry/MAINTAINERS @@ -1,3 +1,4 @@ Sam Alba (@samalba) Joffrey Fuhrer (@shin-) Ken Cochrane (@kencochrane) +Vincent Batts (@vbatts) diff --git a/registry/auth.go b/registry/auth.go index 4fdd51fda4..7384efbad6 100644 --- a/registry/auth.go +++ b/registry/auth.go @@ -5,12 +5,13 @@ import ( "encoding/json" "errors" "fmt" - "github.com/dotcloud/docker/utils" "io/ioutil" "net/http" "os" "path" "strings" + + "github.com/dotcloud/docker/utils" ) // Where we store the config file @@ -152,10 +153,16 @@ func SaveConfig(configFile *ConfigFile) error { // try to register/login to the registry server func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, error) { var ( - status string - reqBody []byte - err error - client = &http.Client{} + status string + reqBody []byte + err error + client = &http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + Proxy: http.ProxyFromEnvironment, + }, + CheckRedirect: AddRequiredHeadersToRedirectedRequests, + } reqStatusCode = 0 serverAddress = authConfig.ServerAddress ) diff --git a/registry/registry.go b/registry/registry.go index 2e3e7e03a7..8d1a9f2287 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -256,12 +256,43 @@ func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([ return jsonString, imageSize, nil } -func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string) (io.ReadCloser, error) { - req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/layer", nil) +func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) { + var ( + retries = 5 + headRes *http.Response + hasResume bool = false + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + headReq, err := r.reqFactory.NewRequest("HEAD", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %s\n", err) + } + setTokenAuth(headReq, token) + for i := 1; i <= retries; i++ { + headRes, err = r.client.Do(headReq) + if err != nil && i == retries { + return nil, fmt.Errorf("Eror while making head request: %s\n", err) + } else if err != nil { + time.Sleep(time.Duration(i) * 5 * time.Second) + continue + } + break + } + + if headRes.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + hasResume = true + } + + req, err := r.reqFactory.NewRequest("GET", imageURL, nil) if err != nil { return nil, fmt.Errorf("Error while getting from the server: %s\n", err) } setTokenAuth(req, token) + if hasResume { + utils.Debugf("server supports resume") + return utils.ResumableRequestReader(r.client, req, 5, imgSize), nil + } + utils.Debugf("server doesn't support resume") res, err := r.client.Do(req) if err != nil { return nil, err @@ -725,8 +756,52 @@ type Registry struct { indexEndpoint string } +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if strings.HasSuffix(hostname, trusted) { + return true + } + } + return false +} + +func AddRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + } else { + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + } + return nil +} + func NewRegistry(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string) (r *Registry, err error) { + httpDial := func(proto string, addr string) (net.Conn, error) { + conn, err := net.Dial(proto, addr) + if err != nil { + return nil, err + } + conn = utils.NewTimeoutConn(conn, time.Duration(1)*time.Minute) + return conn, nil + } + httpTransport := &http.Transport{ + Dial: httpDial, DisableKeepAlives: true, Proxy: http.ProxyFromEnvironment, } @@ -734,10 +809,12 @@ func NewRegistry(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, inde r = &Registry{ authConfig: authConfig, client: &http.Client{ - Transport: httpTransport, + Transport: httpTransport, + CheckRedirect: AddRequiredHeadersToRedirectedRequests, }, indexEndpoint: indexEndpoint, } + r.client.Jar, err = cookiejar.New(nil) if err != nil { return nil, err diff --git a/registry/registry_test.go b/registry/registry_test.go index 0a5be5e543..2857ab4a49 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -2,10 +2,12 @@ package registry import ( "fmt" - "github.com/dotcloud/docker/utils" + "net/http" "net/url" "strings" "testing" + + "github.com/dotcloud/docker/utils" ) var ( @@ -70,7 +72,7 @@ func TestGetRemoteImageJSON(t *testing.T) { func TestGetRemoteImageLayer(t *testing.T) { r := spawnTestRegistry(t) - data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN) + data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN, 0) if err != nil { t.Fatal(err) } @@ -78,7 +80,7 @@ func TestGetRemoteImageLayer(t *testing.T) { t.Fatal("Expected non-nil data result") } - _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), TOKEN) + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), TOKEN, 0) if err == nil { t.Fatal("Expected image not found error") } @@ -231,3 +233,70 @@ func TestValidRepositoryName(t *testing.T) { t.Fail() } } + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.io"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.io:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatal("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatal("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} diff --git a/runconfig/config.go b/runconfig/config.go index 33a7882b6f..8a069c64c7 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -12,9 +12,10 @@ type Config struct { Hostname string Domainname string User string - Memory int64 // Memory limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap - CpuShares int64 // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 // CPU shares (relative weight vs. other containers) + Cpuset string // Cpuset 0-2, 0,1 AttachStdin bool AttachStdout bool AttachStderr bool @@ -41,6 +42,7 @@ func ContainerConfigFromJob(job *engine.Job) *Config { Memory: job.GetenvInt64("Memory"), MemorySwap: job.GetenvInt64("MemorySwap"), CpuShares: job.GetenvInt64("CpuShares"), + Cpuset: job.Getenv("Cpuset"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStdout: job.GetenvBool("AttachStdout"), AttachStderr: job.GetenvBool("AttachStderr"), diff --git a/runconfig/config_test.go b/runconfig/config_test.go index f71528ff8e..b426253b9e 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -1,9 +1,10 @@ package runconfig import ( - "github.com/dotcloud/docker/nat" "strings" "testing" + + "github.com/dotcloud/docker/nat" ) func parse(t *testing.T, args string) (*Config, *HostConfig, error) { @@ -93,32 +94,20 @@ func TestParseRunVolumes(t *testing.T) { t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) } - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) } - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" { + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" { + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) } else if _, exists := config.Volumes["/containerVar"]; !exists { t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } diff --git a/runconfig/merge.go b/runconfig/merge.go index 1240dbcacd..e30b4cec24 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -1,9 +1,10 @@ package runconfig import ( + "strings" + "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/utils" - "strings" ) func Merge(userConf, imageConf *Config) error { @@ -65,15 +66,6 @@ func Merge(userConf, imageConf *Config) error { } } - if !userConf.Tty { - userConf.Tty = imageConf.Tty - } - if !userConf.OpenStdin { - userConf.OpenStdin = imageConf.OpenStdin - } - if !userConf.StdinOnce { - userConf.StdinOnce = imageConf.StdinOnce - } if userConf.Env == nil || len(userConf.Env) == 0 { userConf.Env = imageConf.Env } else { @@ -91,6 +83,7 @@ func Merge(userConf, imageConf *Config) error { } } } + if userConf.Cmd == nil || len(userConf.Cmd) == 0 { userConf.Cmd = imageConf.Cmd } diff --git a/runconfig/parse.go b/runconfig/parse.go index f27adc2cae..0fa287adb1 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -10,6 +10,7 @@ import ( "github.com/dotcloud/docker/opts" flag "github.com/dotcloud/docker/pkg/mflag" "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/dotcloud/docker/pkg/units" "github.com/dotcloud/docker/utils" ) @@ -17,6 +18,7 @@ var ( ErrInvalidWorkingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and --net") ) //FIXME Only used in tests @@ -62,6 +64,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCpuset = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)") flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:': reuses another container network stack\n'host': use the host network stack inside the contaner") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") @@ -101,6 +104,10 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf return nil, nil, cmd, ErrConflictDetachAutoRemove } + if *flNetMode != "bridge" && *flHostname != "" { + return nil, nil, cmd, ErrConflictNetworkHostname + } + // If neither -d or -a are set, attach to everything by default if flAttach.Len() == 0 && !*flDetach { if !*flDetach { @@ -114,7 +121,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf var flMemory int64 if *flMemoryString != "" { - parsedMemory, err := utils.RAMInBytes(*flMemoryString) + parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return nil, nil, cmd, err } @@ -128,8 +135,8 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf if arr[0] == "/" { return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'") } - dstDir := arr[1] - flVolumes.Set(dstDir) + // after creating the bind mount we want to delete it from the flVolumes values because + // we do not want bind mounts being committed to image configs binds = append(binds, bind) flVolumes.Delete(bind) } else if bind == "/" { @@ -214,6 +221,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf OpenStdin: *flStdin, Memory: flMemory, CpuShares: *flCpuShares, + Cpuset: *flCpuset, AttachStdin: flAttach.Get("stdin"), AttachStdout: flAttach.Get("stdout"), AttachStderr: flAttach.Get("stderr"), diff --git a/server/MAINTAINERS b/server/MAINTAINERS index aee10c8421..3564d3db47 100644 --- a/server/MAINTAINERS +++ b/server/MAINTAINERS @@ -1 +1,2 @@ Solomon Hykes (@shykes) +Victor Vieux (@vieux) \ No newline at end of file diff --git a/server/buildfile.go b/server/buildfile.go index 5ab3b5ae8b..26fc49890a 100644 --- a/server/buildfile.go +++ b/server/buildfile.go @@ -16,10 +16,13 @@ import ( "regexp" "sort" "strings" + "syscall" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemon" "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/pkg/symlink" + "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" @@ -49,6 +52,7 @@ type buildFile struct { verbose bool utilizeCache bool rm bool + forceRm bool authConfig *registry.AuthConfig configFile *registry.ConfigFile @@ -336,7 +340,7 @@ func (b *buildFile) CmdInsert(args string) error { } func (b *buildFile) CmdCopy(args string) error { - return fmt.Errorf("COPY has been deprecated. Please use ADD instead") + return b.runContextCommand(args, false, false, "COPY") } func (b *buildFile) CmdWorkdir(workdir string) error { @@ -395,24 +399,34 @@ func (b *buildFile) checkPathForAddition(orig string) error { return nil } -func (b *buildFile) addContext(container *daemon.Container, orig, dest string, remote bool) error { +func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error { var ( - err error - origPath = path.Join(b.contextPath, orig) - destPath = path.Join(container.RootfsPath(), dest) + err error + destExists = true + origPath = path.Join(b.contextPath, orig) + destPath = path.Join(container.RootfsPath(), dest) ) if destPath != container.RootfsPath() { - destPath, err = utils.FollowSymlinkInScope(destPath, container.RootfsPath()) + destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) if err != nil { return err } } // Preserve the trailing '/' - if strings.HasSuffix(dest, "/") { + if strings.HasSuffix(dest, "/") || dest == "." { destPath = destPath + "/" } + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + destExists = false + } + fi, err := os.Stat(origPath) if err != nil { if os.IsNotExist(err) { @@ -421,45 +435,29 @@ func (b *buildFile) addContext(container *daemon.Container, orig, dest string, r return err } - chownR := func(destPath string, uid, gid int) error { - return filepath.Walk(destPath, func(path string, info os.FileInfo, err error) error { - if err := os.Lchown(path, uid, gid); err != nil { - return err - } - return nil - }) - } - if fi.IsDir() { - if err := archive.CopyWithTar(origPath, destPath); err != nil { - return err - } - if err := chownR(destPath, 0, 0); err != nil { - return err - } - return nil + return copyAsDirectory(origPath, destPath, destExists) } - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in / . - tarDest := destPath - if strings.HasSuffix(tarDest, "/") { - tarDest = filepath.Dir(destPath) - } + // If we are adding a remote file (or we've been told not to decompress), do not try to untar it + if decompress { + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in / . + tarDest := destPath + if strings.HasSuffix(tarDest, "/") { + tarDest = filepath.Dir(destPath) + } - // If we are adding a remote file, do not try to untar it - if !remote { // try to successfully untar the orig if err := archive.UntarPath(origPath, tarDest); err == nil { return nil + } else if err != io.EOF { + utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) } - utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err) } - // If that fails, just copy it as a regular file - // but do not use all the magic path handling for the tar path if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { return err } @@ -467,19 +465,21 @@ func (b *buildFile) addContext(container *daemon.Container, orig, dest string, r return err } - if err := chownR(destPath, 0, 0); err != nil { - return err + resPath := destPath + if destExists && destStat.IsDir() { + resPath = path.Join(destPath, path.Base(origPath)) } - return nil + + return fixPermissions(resPath, 0, 0) } -func (b *buildFile) CmdAdd(args string) error { +func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error { if b.context == nil { - return fmt.Errorf("No context given. Impossible to use ADD") + return fmt.Errorf("No context given. Impossible to use %s", cmdName) } tmp := strings.SplitN(args, " ", 2) if len(tmp) != 2 { - return fmt.Errorf("Invalid ADD format") + return fmt.Errorf("Invalid %s format", cmdName) } orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) @@ -493,7 +493,8 @@ func (b *buildFile) CmdAdd(args string) error { } cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)} + b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} + defer func(cmd []string) { b.config.Cmd = cmd }(cmd) b.config.Image = b.image var ( @@ -501,11 +502,14 @@ func (b *buildFile) CmdAdd(args string) error { destPath = dest remoteHash string isRemote bool + decompress = true ) - if utils.IsURL(orig) { + isRemote = utils.IsURL(orig) + if isRemote && !allowRemote { + return fmt.Errorf("Source can't be an URL for %s", cmdName) + } else if utils.IsURL(orig) { // Initiate the download - isRemote = true resp, err := utils.Download(orig) if err != nil { return err @@ -532,6 +536,11 @@ func (b *buildFile) CmdAdd(args string) error { } tmpFile.Close() + // Remove the mtime of the newly created tmp file + if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { + return err + } + origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // Process the checksum @@ -539,7 +548,10 @@ func (b *buildFile) CmdAdd(args string) error { if err != nil { return err } - tarSum := utils.TarSum{Reader: r, DisableCompression: true} + tarSum := &utils.TarSum{Reader: r, DisableCompression: true} + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return err + } remoteHash = tarSum.Sum(nil) r.Close() @@ -599,7 +611,7 @@ func (b *buildFile) CmdAdd(args string) error { hash = "file:" + h } } - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)} + b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)} hit, err := b.probeCache() if err != nil { return err @@ -610,7 +622,7 @@ func (b *buildFile) CmdAdd(args string) error { } } - // Create the container and start it + // Create the container container, _, err := b.daemon.Create(b.config, "") if err != nil { return err @@ -622,24 +634,30 @@ func (b *buildFile) CmdAdd(args string) error { } defer container.Unmount() - if err := b.addContext(container, origPath, destPath, isRemote); err != nil { + if !allowDecompression || isRemote { + decompress = false + } + if err := b.addContext(container, origPath, destPath, decompress); err != nil { return err } - if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil { + if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil { return err } - b.config.Cmd = cmd return nil } +func (b *buildFile) CmdAdd(args string) error { + return b.runContextCommand(args, true, true, "ADD") +} + func (b *buildFile) create() (*daemon.Container, error) { if b.image == "" { return nil, fmt.Errorf("Please provide a source image with `from` prior to run") } b.config.Image = b.image - // Create the container and start it + // Create the container c, _, err := b.daemon.Create(b.config, "") if err != nil { return nil, err @@ -780,6 +798,9 @@ func (b *buildFile) Build(context io.Reader) (string, error) { continue } if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { + if b.forceRm { + b.clearTmp(b.tmpContainers) + } return "", err } else if b.rm { b.clearTmp(b.tmpContainers) @@ -832,7 +853,38 @@ func stripComments(raw []byte) string { return strings.Join(out, "\n") } -func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { +func copyAsDirectory(source, destination string, destinationExists bool) error { + if err := archive.CopyWithTar(source, destination); err != nil { + return err + } + + if destinationExists { + files, err := ioutil.ReadDir(source) + if err != nil { + return err + } + + for _, file := range files { + if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { + return err + } + } + return nil + } + + return fixPermissions(destination, 0, 0) +} + +func fixPermissions(destination string, uid, gid int) error { + return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { + if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { + return err + } + return nil + }) +} + +func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { return &buildFile{ daemon: srv.daemon, srv: srv, @@ -844,6 +896,7 @@ func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeC verbose: verbose, utilizeCache: utilizeCache, rm: rm, + forceRm: forceRm, sf: sf, authConfig: auth, configFile: authConfigFile, diff --git a/server/server.go b/server/server.go index 3763f87dd5..93c6d39baf 100644 --- a/server/server.go +++ b/server/server.go @@ -27,6 +27,7 @@ import ( "io" "io/ioutil" "log" + "net" "net/http" "net/url" "os" @@ -34,14 +35,14 @@ import ( gosignal "os/signal" "path" "path/filepath" - goruntime "runtime" + "runtime" "strconv" "strings" "sync" + "sync/atomic" "syscall" "time" - "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemon" "github.com/dotcloud/docker/daemonconfig" @@ -54,8 +55,20 @@ import ( "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/utils/filters" ) +func (srv *Server) handlerWrap(h engine.Handler) engine.Handler { + return func(job *engine.Job) engine.Status { + if !srv.IsRunning() { + return job.Errorf("Server is not running") + } + srv.tasks.Add(1) + defer srv.tasks.Done() + return h(job) + } +} + // jobInitApi runs the remote api server `srv` as a daemon, // Only one api server can run at the same time - this is enforced by a pidfile. // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. @@ -76,17 +89,17 @@ func InitServer(job *engine.Job) engine.Status { c := make(chan os.Signal, 1) gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) go func() { - interruptCount := 0 + interruptCount := uint32(0) for sig := range c { - go func() { + go func(sig os.Signal) { log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) switch sig { case os.Interrupt, syscall.SIGTERM: // If the user really wants to interrupt, let him do so. - if interruptCount < 3 { - interruptCount++ + if atomic.LoadUint32(&interruptCount) < 3 { + atomic.AddUint32(&interruptCount, 1) // Initiate the cleanup only once - if interruptCount == 1 { + if atomic.LoadUint32(&interruptCount) == 1 { utils.RemovePidFile(srv.daemon.Config().Pidfile) srv.Close() } else { @@ -98,7 +111,7 @@ func InitServer(job *engine.Job) engine.Status { case syscall.SIGQUIT: } os.Exit(128 + int(sig.(syscall.Signal))) - }() + }(sig) } }() job.Eng.Hack_SetGlobalVar("httpapi.server", srv) @@ -112,8 +125,10 @@ func InitServer(job *engine.Job) engine.Status { "restart": srv.ContainerRestart, "start": srv.ContainerStart, "kill": srv.ContainerKill, + "pause": srv.ContainerPause, + "unpause": srv.ContainerUnpause, "wait": srv.ContainerWait, - "tag": srv.ImageTag, + "tag": srv.ImageTag, // FIXME merge with "image_tag" "resize": srv.ContainerResize, "commit": srv.ContainerCommit, "info": srv.DockerInfo, @@ -123,26 +138,64 @@ func InitServer(job *engine.Job) engine.Status { "history": srv.ImageHistory, "viz": srv.ImagesViz, "container_copy": srv.ContainerCopy, - "insert": srv.ImageInsert, "attach": srv.ContainerAttach, "logs": srv.ContainerLogs, "changes": srv.ContainerChanges, "top": srv.ContainerTop, - "version": srv.DockerVersion, "load": srv.ImageLoad, "build": srv.Build, "pull": srv.ImagePull, "import": srv.ImageImport, "image_delete": srv.ImageDelete, - "inspect": srv.JobInspect, "events": srv.Events, "push": srv.ImagePush, "containers": srv.Containers, } { - if err := job.Eng.Register(name, handler); err != nil { + if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil { return job.Error(err) } } + // Install image-related commands from the image subsystem. + // See `graph/service.go` + if err := srv.daemon.Repositories().Install(job.Eng); err != nil { + return job.Error(err) + } + // Install daemon-related commands from the daemon subsystem. + // See `daemon/` + if err := srv.daemon.Install(job.Eng); err != nil { + return job.Error(err) + } + srv.SetRunning(true) + return engine.StatusOK +} + +func (srv *Server) ContainerPause(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + container := srv.daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + if err := container.Pause(); err != nil { + return job.Errorf("Cannot pause container %s: %s", name, err) + } + return engine.StatusOK +} + +func (srv *Server) ContainerUnpause(job *engine.Job) engine.Status { + if n := len(job.Args); n < 1 || n > 2 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + container := srv.daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + if err := container.Unpause(); err != nil { + return job.Errorf("Cannot unpause container %s: %s", name, err) + } return engine.StatusOK } @@ -195,13 +248,22 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { return engine.StatusOK } +func (srv *Server) EvictListener(from int64) { + srv.Lock() + if old, ok := srv.listeners[from]; ok { + delete(srv.listeners, from) + close(old) + } + srv.Unlock() +} + func (srv *Server) Events(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s FROM", job.Name) + if len(job.Args) != 0 { + return job.Errorf("Usage: %s", job.Name) } var ( - from = job.Args[0] + from = time.Now().UTC().UnixNano() since = job.GetenvInt64("since") until = job.GetenvInt64("until") timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) @@ -212,15 +274,7 @@ func (srv *Server) Events(job *engine.Job) engine.Status { return fmt.Errorf("JSON error") } _, err = job.Stdout.Write(b) - if err != nil { - // On error, evict the listener - utils.Errorf("%s", err) - srv.Lock() - delete(srv.listeners, from) - srv.Unlock() - return err - } - return nil + return err } listener := make(chan utils.JSONMessage) @@ -241,8 +295,9 @@ func (srv *Server) Events(job *engine.Job) engine.Status { continue } if err != nil { - job.Error(err) - return engine.StatusErr + // On error, evict the listener + srv.EvictListener(from) + return job.Error(err) } } } @@ -254,12 +309,17 @@ func (srv *Server) Events(job *engine.Job) engine.Status { } for { select { - case event := <-listener: + case event, ok := <-listener: + if !ok { // Channel is closed: listener was evicted + return engine.StatusOK + } err := sendEvent(&event) if err != nil && err.Error() == "JSON error" { continue } if err != nil { + // On error, evict the listener + srv.EvictListener(from) return job.Error(err) } case <-timeout.C: @@ -317,12 +377,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { } if rootRepo != nil { for _, id := range rootRepo { - image, err := srv.ImageInspect(id) - if err != nil { - return job.Error(err) - } - - if err := srv.exportImage(image, tempdir); err != nil { + if err := srv.exportImage(job.Eng, id, tempdir); err != nil { return job.Error(err) } } @@ -336,11 +391,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { return job.Error(err) } } else { - image, err := srv.ImageInspect(name) - if err != nil { - return job.Error(err) - } - if err := srv.exportImage(image, tempdir); err != nil { + if err := srv.exportImage(job.Eng, name, tempdir); err != nil { return job.Error(err) } } @@ -354,13 +405,14 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { if _, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) } + utils.Debugf("End Serializing %s", name) return engine.StatusOK } -func (srv *Server) exportImage(img *image.Image, tempdir string) error { - for i := img; i != nil; { +func (srv *Server) exportImage(eng *engine.Engine, name, tempdir string) error { + for n := name; n != ""; { // temporary directory - tmpImageDir := path.Join(tempdir, i.ID) + tmpImageDir := path.Join(tempdir, n) if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil { if os.IsExist(err) { return nil @@ -376,44 +428,34 @@ func (srv *Server) exportImage(img *image.Image, tempdir string) error { } // serialize json - b, err := json.Marshal(i) + json, err := os.Create(path.Join(tmpImageDir, "json")) if err != nil { return err } - if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.FileMode(0644)); err != nil { + job := eng.Job("image_inspect", n) + job.Stdout.Add(json) + if err := job.Run(); err != nil { return err } // serialize filesystem - fs, err := i.TarLayer() - if err != nil { - return err - } - defer fs.Close() - fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) if err != nil { return err } - if written, err := io.Copy(fsTar, fs); err != nil { - return err - } else { - utils.Debugf("rendered layer for %s of [%d] size", i.ID, written) - } - - if err = fsTar.Close(); err != nil { + job = eng.Job("image_tarlayer", n) + job.Stdout.Add(fsTar) + if err := job.Run(); err != nil { return err } // find parent - if i.Parent != "" { - i, err = srv.ImageInspect(i.Parent) - if err != nil { - return err - } - } else { - i = nil + job = eng.Job("image_get", n) + info, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + return err } + n = info.Get("Parent") } return nil } @@ -428,6 +470,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status { suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") + forceRm = job.GetenvBool("forcerm") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string @@ -486,7 +529,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status { Writer: job.Stdout, StreamFormatter: sf, }, - !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile) + !suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile) id, err := b.Build(context) if err != nil { return job.Error(err) @@ -538,7 +581,7 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status { for _, d := range dirs { if d.IsDir() { - if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil { + if err := srv.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil { return job.Error(err) } } @@ -565,8 +608,8 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) recursiveLoad(address, tmpImageDir string) error { - if _, err := srv.ImageInspect(address); err != nil { +func (srv *Server) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error { + if err := eng.Job("image_get", address).Run(); err != nil { utils.Debugf("Loading %s", address) imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) @@ -587,7 +630,7 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error { } if img.Parent != "" { if !srv.daemon.Graph().Exists(img.Parent) { - if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil { + if err := srv.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil { return err } } @@ -601,56 +644,6 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error { return nil } -// FIXME: 'insert' is deprecated and should be removed in a future version. -func (srv *Server) ImageInsert(job *engine.Job) engine.Status { - fmt.Fprintf(job.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'build' and 'ADD' instead.\n", job.Name) - if len(job.Args) != 3 { - return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name) - } - - var ( - name = job.Args[0] - url = job.Args[1] - path = job.Args[2] - ) - - sf := utils.NewStreamFormatter(job.GetenvBool("json")) - - out := utils.NewWriteFlusher(job.Stdout) - img, err := srv.daemon.Repositories().LookupImage(name) - if err != nil { - return job.Error(err) - } - - file, err := utils.Download(url) - if err != nil { - return job.Error(err) - } - defer file.Body.Close() - - config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.daemon.SystemConfig()) - if err != nil { - return job.Error(err) - } - - c, _, err := srv.daemon.Create(config, "") - if err != nil { - return job.Error(err) - } - - if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil { - return job.Error(err) - } - // FIXME: Handle custom repo, tag comment, author - img, err = srv.daemon.Commit(c, "", "", img.Comment, img.Author, nil) - if err != nil { - out.Write(sf.FormatError(err)) - return engine.StatusErr - } - out.Write(sf.FormatStatus("", img.ID)) - return engine.StatusOK -} - func (srv *Server) ImagesViz(job *engine.Job) engine.Status { images, _ := srv.daemon.Graph().Map() if images == nil { @@ -674,15 +667,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status { } } - reporefs := make(map[string][]string) - - for name, repository := range srv.daemon.Repositories().Repositories { - for tag, id := range repository { - reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag)) - } - } - - for id, repos := range reporefs { + for id, repos := range srv.daemon.Repositories().GetRepoRefs() { job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) } job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) @@ -691,10 +676,24 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status { func (srv *Server) Images(job *engine.Job) engine.Status { var ( - allImages map[string]*image.Image - err error + allImages map[string]*image.Image + err error + filt_tagged = true ) - if job.GetenvBool("all") { + + imageFilters, err := filters.FromParam(job.Getenv("filters")) + if err != nil { + return job.Error(err) + } + if i, ok := imageFilters["dangling"]; ok { + for _, value := range i { + if strings.ToLower(value) == "true" { + filt_tagged = false + } + } + } + + if job.GetenvBool("all") && filt_tagged { allImages, err = srv.daemon.Graph().Map() } else { allImages, err = srv.daemon.Graph().Heads() @@ -703,6 +702,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status { return job.Error(err) } lookup := make(map[string]*engine.Env) + srv.daemon.Repositories().Lock() for name, repository := range srv.daemon.Repositories().Repositories { if job.Getenv("filter") != "" { if match, _ := path.Match(job.Getenv("filter"), name); !match { @@ -717,21 +717,27 @@ func (srv *Server) Images(job *engine.Job) engine.Status { } if out, exists := lookup[id]; exists { - out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) + if filt_tagged { + out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) + } } else { - out := &engine.Env{} + // get the boolean list for if only the untagged images are requested delete(allImages, id) - out.Set("ParentId", image.Parent) - out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) - out.Set("Id", image.ID) - out.SetInt64("Created", image.Created.Unix()) - out.SetInt64("Size", image.Size) - out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) - lookup[id] = out + if filt_tagged { + out := &engine.Env{} + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + lookup[id] = out + } } } } + srv.daemon.Repositories().Unlock() outs := engine.NewTable("Created", len(lookup)) for _, value := range lookup { @@ -789,7 +795,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { v.SetBool("IPv4Forwarding", !srv.daemon.SystemConfig().IPv4ForwardingDisabled) v.SetBool("Debug", os.Getenv("DEBUG") != "") v.SetInt("NFd", utils.GetTotalUsedFds()) - v.SetInt("NGoroutines", goruntime.NumGoroutine()) + v.SetInt("NGoroutines", runtime.NumGoroutine()) v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name()) v.SetInt("NEventsListener", len(srv.listeners)) v.Set("KernelVersion", kernelVersion) @@ -802,23 +808,6 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) DockerVersion(job *engine.Job) engine.Status { - v := &engine.Env{} - v.Set("Version", dockerversion.VERSION) - v.SetJson("ApiVersion", api.APIVERSION) - v.Set("GitCommit", dockerversion.GITCOMMIT) - v.Set("GoVersion", goruntime.Version()) - v.Set("Os", goruntime.GOOS) - v.Set("Arch", goruntime.GOARCH) - if kernelVersion, err := utils.GetKernelVersion(); err == nil { - v.Set("KernelVersion", kernelVersion.String()) - } - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - func (srv *Server) ImageHistory(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) @@ -1057,8 +1046,12 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { if container == nil { return job.Errorf("No such container: %s", name) } - var config = container.Config - var newConfig runconfig.Config + + var ( + config = container.Config + newConfig runconfig.Config + ) + if err := job.GetenvJson("config", &newConfig); err != nil { return job.Error(err) } @@ -1103,7 +1096,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin // ensure no two downloads of the same layer happen at the same time if c, err := srv.poolAdd("pull", "layer:"+id); err != nil { - utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err) + utils.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) <-c } defer srv.poolRemove("pull", "layer:"+id) @@ -1138,17 +1131,38 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin } } - // Get the layer - out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil)) - layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token) - if err != nil { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) - return err - } - defer layer.Close() - if err := srv.daemon.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) - return err + for j := 1; j <= retries; j++ { + // Get the layer + status := "Pulling fs layer" + if j > 1 { + status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) + } + out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil)) + layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize)) + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return err + } + defer layer.Close() + + err = srv.daemon.Graph().Register(imgJSON, + utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), + img) + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) + return err + } else { + break + } } } out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) @@ -1221,7 +1235,7 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName <-c out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) } else { - utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + utils.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) } if parallel { errors <- nil @@ -1285,9 +1299,6 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName return err } } - if err := srv.daemon.Repositories().Save(); err != nil { - return err - } return nil } @@ -1351,7 +1362,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { } job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", metaHeaders) + job.GetenvJson("metaHeaders", &metaHeaders) c, err := srv.poolAdd("pull", localName+":"+tag) if err != nil { @@ -1581,7 +1592,7 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status { tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", metaHeaders) + job.GetenvJson("metaHeaders", &metaHeaders) if _, err := srv.poolAdd("push", localName); err != nil { return job.Error(err) } @@ -2043,37 +2054,6 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag return match, nil } -func (srv *Server) RegisterLinks(container *daemon.Container, hostConfig *runconfig.HostConfig) error { - daemon := srv.daemon - - if hostConfig != nil && hostConfig.Links != nil { - for _, l := range hostConfig.Links { - parts, err := utils.PartParser("name:alias", l) - if err != nil { - return err - } - child, err := srv.daemon.GetByName(parts["name"]) - if err != nil { - return err - } - if child == nil { - return fmt.Errorf("Could not get container for %s", parts["name"]) - } - if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil { - return err - } - } - - // After we load all the links into the daemon - // set them to nil on the hostconfig - hostConfig.Links = nil - if err := container.WriteHostConfig(); err != nil { - return err - } - } - return nil -} - func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) @@ -2114,7 +2094,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { } } // Register any links from the host config before starting the container - if err := srv.RegisterLinks(container, hostConfig); err != nil { + if err := srv.daemon.RegisterLinks(container, hostConfig); err != nil { return job.Error(err) } container.SetHostConfig(hostConfig) @@ -2375,64 +2355,6 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) ContainerInspect(name string) (*daemon.Container, error) { - if container := srv.daemon.Get(name); container != nil { - return container, nil - } - return nil, fmt.Errorf("No such container: %s", name) -} - -func (srv *Server) ImageInspect(name string) (*image.Image, error) { - if image, err := srv.daemon.Repositories().LookupImage(name); err == nil && image != nil { - return image, nil - } - return nil, fmt.Errorf("No such image: %s", name) -} - -func (srv *Server) JobInspect(job *engine.Job) engine.Status { - // TODO: deprecate KIND/conflict - if n := len(job.Args); n != 2 { - return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name) - } - var ( - name = job.Args[0] - kind = job.Args[1] - object interface{} - conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images - image, errImage = srv.ImageInspect(name) - container, errContainer = srv.ContainerInspect(name) - ) - - if conflict && image != nil && container != nil { - return job.Errorf("Conflict between containers and images") - } - - switch kind { - case "image": - if errImage != nil { - return job.Error(errImage) - } - object = image - case "container": - if errContainer != nil { - return job.Error(errContainer) - } - object = &struct { - *daemon.Container - HostConfig *runconfig.HostConfig - }{container, container.HostConfig()} - default: - return job.Errorf("Unknown kind: %s", kind) - } - - b, err := json.Marshal(object) - if err != nil { - return job.Error(err) - } - job.Stdout.Write(b) - return engine.StatusOK -} - func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) @@ -2470,8 +2392,7 @@ func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) pullingPool: make(map[string]chan struct{}), pushingPool: make(map[string]chan struct{}), events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events - listeners: make(map[string]chan utils.JSONMessage), - running: true, + listeners: make(map[int64]chan utils.JSONMessage), } daemon.SetServer(srv) return srv, nil @@ -2520,6 +2441,16 @@ func (srv *Server) Close() error { return nil } srv.SetRunning(false) + done := make(chan struct{}) + go func() { + srv.tasks.Wait() + close(done) + }() + select { + // Waiting server jobs for 15 seconds, shutdown immediately after that time + case <-time.After(time.Second * 15): + case <-done: + } if srv.daemon == nil { return nil } @@ -2532,7 +2463,8 @@ type Server struct { pullingPool map[string]chan struct{} pushingPool map[string]chan struct{} events []utils.JSONMessage - listeners map[string]chan utils.JSONMessage + listeners map[int64]chan utils.JSONMessage Eng *engine.Engine running bool + tasks sync.WaitGroup } diff --git a/server/server_unit_test.go b/server/server_unit_test.go index b471c5c581..47e4be8280 100644 --- a/server/server_unit_test.go +++ b/server/server_unit_test.go @@ -1,9 +1,10 @@ package server import ( - "github.com/dotcloud/docker/utils" "testing" "time" + + "github.com/dotcloud/docker/utils" ) func TestPools(t *testing.T) { @@ -47,14 +48,14 @@ func TestPools(t *testing.T) { func TestLogEvent(t *testing.T) { srv := &Server{ events: make([]utils.JSONMessage, 0, 64), - listeners: make(map[string]chan utils.JSONMessage), + listeners: make(map[int64]chan utils.JSONMessage), } srv.LogEvent("fakeaction", "fakeid", "fakeimage") listener := make(chan utils.JSONMessage) srv.Lock() - srv.listeners["test"] = listener + srv.listeners[1337] = listener srv.Unlock() srv.LogEvent("fakeaction2", "fakeid", "fakeimage") diff --git a/sysinit/README.md b/sysinit/README.md new file mode 100644 index 0000000000..c28d0298b8 --- /dev/null +++ b/sysinit/README.md @@ -0,0 +1,4 @@ +Sys Init code + +This code is run INSIDE the container and is responsible for setting +up the environment before running the actual process diff --git a/utils/filters/parse.go b/utils/filters/parse.go new file mode 100644 index 0000000000..27c7132e8e --- /dev/null +++ b/utils/filters/parse.go @@ -0,0 +1,63 @@ +package filters + +import ( + "encoding/json" + "errors" + "strings" +) + +type Args map[string][]string + +// Parse the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + var filters Args = prev + if prev == nil { + filters = Args{} + } + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrorBadFormat + } + + f := strings.SplitN(arg, "=", 2) + filters[f[0]] = append(filters[f[0]], f[1]) + + return filters, nil +} + +var ErrorBadFormat = errors.New("bad format of filter (expected name=value)") + +// packs the Args into an string for easy transport from client to server +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if len(a) == 0 { + return "", nil + } + + buf, err := json.Marshal(a) + if err != nil { + return "", err + } + return string(buf), nil +} + +// unpacks the filter Args +func FromParam(p string) (Args, error) { + args := Args{} + if len(p) == 0 { + return args, nil + } + err := json.Unmarshal([]byte(p), &args) + if err != nil { + return nil, err + } + return args, nil +} diff --git a/utils/filters/parse_test.go b/utils/filters/parse_test.go new file mode 100644 index 0000000000..a248350223 --- /dev/null +++ b/utils/filters/parse_test.go @@ -0,0 +1,78 @@ +package filters + +import ( + "sort" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = Args{} + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args["created"]) != 1 { + t.Errorf("failed to set this arg") + } + if len(args["image.name"]) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParam(t *testing.T) { + a := Args{ + "created": []string{"today"}, + "image.name": []string{"ubuntu*", "*untu"}, + } + + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + for key, vals := range v1 { + if _, ok := a[key]; !ok { + t.Errorf("could not find key %s in original set", key) + } + sort.Strings(vals) + sort.Strings(a[key]) + if len(vals) != len(a[key]) { + t.Errorf("value lengths ought to match") + continue + } + for i := range vals { + if vals[i] != a[key][i] { + t.Errorf("expected %s, but got %s", a[key][i], vals[i]) + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if len(a) != len(v1) { + t.Errorf("these should both be empty sets") + } +} diff --git a/utils/http.go b/utils/http.go index 68e93d8eb9..e193633792 100644 --- a/utils/http.go +++ b/utils/http.go @@ -1,7 +1,6 @@ package utils import ( - "bytes" "io" "net/http" "strings" @@ -15,11 +14,13 @@ type VersionInfo interface { } func validVersion(version VersionInfo) bool { - stopChars := " \t\r\n/" - if strings.ContainsAny(version.Name(), stopChars) { + const stopChars = " \t\r\n/" + name := version.Name() + vers := version.Version() + if len(name) == 0 || strings.ContainsAny(name, stopChars) { return false } - if strings.ContainsAny(version.Version(), stopChars) { + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { return false } return true @@ -36,27 +37,18 @@ func appendVersions(base string, versions ...VersionInfo) string { return base } - var buf bytes.Buffer + verstrs := make([]string, 0, 1+len(versions)) if len(base) > 0 { - buf.Write([]byte(base)) + verstrs = append(verstrs, base) } for _, v := range versions { - name := []byte(v.Name()) - version := []byte(v.Version()) - - if len(name) == 0 || len(version) == 0 { - continue - } if !validVersion(v) { continue } - buf.Write([]byte(v.Name())) - buf.Write([]byte("/")) - buf.Write([]byte(v.Version())) - buf.Write([]byte(" ")) + verstrs = append(verstrs, v.Name()+"/"+v.Version()) } - return buf.String() + return strings.Join(verstrs, " ") } // HTTPRequestDecorator is used to change an instance of diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go index 6be421be94..d6546e3ee6 100644 --- a/utils/jsonmessage.go +++ b/utils/jsonmessage.go @@ -3,10 +3,12 @@ package utils import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/pkg/term" "io" "strings" "time" + + "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/pkg/units" ) type JSONError struct { @@ -41,11 +43,11 @@ func (p *JSONProgress) String() string { if p.Current <= 0 && p.Total <= 0 { return "" } - current := HumanSize(int64(p.Current)) + current := units.HumanSize(int64(p.Current)) if p.Total <= 0 { return fmt.Sprintf("%8v", current) } - total := HumanSize(int64(p.Total)) + total := units.HumanSize(int64(p.Total)) percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if width > 110 { pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", 50-percentage)) diff --git a/utils/resumablerequestreader.go b/utils/resumablerequestreader.go new file mode 100644 index 0000000000..e01f4e6d71 --- /dev/null +++ b/utils/resumablerequestreader.go @@ -0,0 +1,87 @@ +package utils + +import ( + "fmt" + "io" + "net/http" + "time" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures += 1 + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + Debugf("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/utils/signal_freebsd.go b/utils/signal_freebsd.go deleted file mode 100644 index 65a700e894..0000000000 --- a/utils/signal_freebsd.go +++ /dev/null @@ -1,42 +0,0 @@ -package utils - -import ( - "os" - "os/signal" - "syscall" -) - -func CatchAll(sigc chan os.Signal) { - signal.Notify(sigc, - syscall.SIGABRT, - syscall.SIGALRM, - syscall.SIGBUS, - syscall.SIGCHLD, - syscall.SIGCONT, - syscall.SIGFPE, - syscall.SIGHUP, - syscall.SIGILL, - syscall.SIGINT, - syscall.SIGIO, - syscall.SIGIOT, - syscall.SIGKILL, - syscall.SIGPIPE, - syscall.SIGPROF, - syscall.SIGQUIT, - syscall.SIGSEGV, - syscall.SIGSTOP, - syscall.SIGSYS, - syscall.SIGTERM, - syscall.SIGTRAP, - syscall.SIGTSTP, - syscall.SIGTTIN, - syscall.SIGTTOU, - syscall.SIGURG, - syscall.SIGUSR1, - syscall.SIGUSR2, - syscall.SIGVTALRM, - syscall.SIGWINCH, - syscall.SIGXCPU, - syscall.SIGXFSZ, - ) -} diff --git a/utils/streamformatter.go b/utils/streamformatter.go index d2758d3ca6..d0bc295bb3 100644 --- a/utils/streamformatter.go +++ b/utils/streamformatter.go @@ -8,11 +8,10 @@ import ( type StreamFormatter struct { json bool - used bool } func NewStreamFormatter(json bool) *StreamFormatter { - return &StreamFormatter{json, false} + return &StreamFormatter{json} } const streamNewline = "\r\n" @@ -20,7 +19,6 @@ const streamNewline = "\r\n" var streamNewlineBytes = []byte(streamNewline) func (sf *StreamFormatter) FormatStream(str string) []byte { - sf.used = true if sf.json { b, err := json.Marshal(&JSONMessage{Stream: str}) if err != nil { @@ -32,7 +30,6 @@ func (sf *StreamFormatter) FormatStream(str string) []byte { } func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { - sf.used = true str := fmt.Sprintf(format, a...) if sf.json { b, err := json.Marshal(&JSONMessage{ID: id, Status: str}) @@ -45,7 +42,6 @@ func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []b } func (sf *StreamFormatter) FormatError(err error) []byte { - sf.used = true if sf.json { jsonError, ok := err.(*JSONError) if !ok { @@ -63,7 +59,6 @@ func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgr if progress == nil { progress = &JSONProgress{} } - sf.used = true if sf.json { b, err := json.Marshal(&JSONMessage{ @@ -84,10 +79,6 @@ func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgr return []byte(action + " " + progress.String() + endl) } -func (sf *StreamFormatter) Used() bool { - return sf.used -} - func (sf *StreamFormatter) Json() bool { return sf.json } diff --git a/utils/streamformatter_test.go b/utils/streamformatter_test.go new file mode 100644 index 0000000000..20610f6c01 --- /dev/null +++ b/utils/streamformatter_test.go @@ -0,0 +1,67 @@ +package utils + +import ( + "encoding/json" + "errors" + "reflect" + "testing" +) + +func TestFormatStream(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatStream("stream") + if string(res) != `{"stream":"stream"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatStatus(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatSimpleError(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatJSONError(t *testing.T) { + sf := NewStreamFormatter(true) + err := &JSONError{Code: 50, Message: "Json error"} + res := sf.FormatError(err) + if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatProgress(t *testing.T) { + sf := NewStreamFormatter(true) + progress := &JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.FormatProgress("id", "action", progress) + msg := &JSONMessage{} + if err := json.Unmarshal(res, msg); err != nil { + t.Fatal(err) + } + if msg.ID != "id" { + t.Fatalf("ID must be 'id', got: %s", msg.ID) + } + if msg.Status != "action" { + t.Fatalf("Status must be 'action', got: %s", msg.Status) + } + if msg.ProgressMessage != progress.String() { + t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage) + } + if !reflect.DeepEqual(msg.Progress, progress) { + t.Fatal("Original progress not equals progress from FormatProgress") + } +} diff --git a/utils/tarsum_test.go b/utils/tarsum_test.go new file mode 100644 index 0000000000..52ddd64590 --- /dev/null +++ b/utils/tarsum_test.go @@ -0,0 +1,224 @@ +package utils + +import ( + "bytes" + "crypto/rand" + "fmt" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "io" + "io/ioutil" + "os" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:ac672ee85da9ab7f9667ae3c32841d3e42f33cc52c273c23341dabba1c8b0c8b"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#V", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + // double negatives! + ts := &TarSum{Reader: fh, DisableCompression: !layer.gzip} + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + } +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts := &TarSum{Reader: buf, DisableCompression: true} + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts := &TarSum{Reader: buf, DisableCompression: false} + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts := &TarSum{Reader: fh, DisableCompression: !isGzip} + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 0000000000..0f0ba4974d --- /dev/null +++ b/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000..dfd5c204ae Binary files /dev/null and b/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar differ diff --git a/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json b/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json new file mode 100644 index 0000000000..12c18a076f --- /dev/null +++ b/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json @@ -0,0 +1 @@ +{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0} \ No newline at end of file diff --git a/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar b/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar new file mode 100644 index 0000000000..880b3f2c56 Binary files /dev/null and b/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar differ diff --git a/utils/timeoutconn.go b/utils/timeoutconn.go new file mode 100644 index 0000000000..a3231c7ee3 --- /dev/null +++ b/utils/timeoutconn.go @@ -0,0 +1,26 @@ +package utils + +import ( + "net" + "time" +) + +func NewTimeoutConn(conn net.Conn, timeout time.Duration) net.Conn { + return &TimeoutConn{conn, timeout} +} + +// A net.Conn that sets a deadline for every Read or Write operation +type TimeoutConn struct { + net.Conn + timeout time.Duration +} + +func (c *TimeoutConn) Read(b []byte) (int, error) { + if c.timeout > 0 { + err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout)) + if err != nil { + return 0, err + } + } + return c.Conn.Read(b) +} diff --git a/utils/utils.go b/utils/utils.go index 4ef44b5617..0495dc6fa8 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -16,11 +16,11 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "runtime" "strconv" "strings" "sync" + "syscall" "time" "github.com/dotcloud/docker/dockerversion" @@ -83,79 +83,6 @@ func Errorf(format string, a ...interface{}) { logf("error", format, a...) } -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.) -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%f years", d.Hours()/24/365) -} - -// HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB") -func HumanSize(size int64) string { - i := 0 - var sizef float64 - sizef = float64(size) - units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - for sizef >= 1000.0 { - sizef = sizef / 1000.0 - i++ - } - return fmt.Sprintf("%.4g %s", sizef, units[i]) -} - -// Parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes or gibibytes, and returns the -// number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (bytes int64, err error) { - re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$") - if error != nil { - return -1, error - } - - matches := re.FindStringSubmatch(size) - - if len(matches) != 3 { - return -1, fmt.Errorf("Invalid size: '%s'", size) - } - - memLimit, error := strconv.ParseInt(matches[1], 10, 0) - if error != nil { - return -1, error - } - - unit := strings.ToLower(matches[2]) - - if unit == "k" { - memLimit *= 1024 - } else if unit == "m" { - memLimit *= 1024 * 1024 - } else if unit == "g" { - memLimit *= 1024 * 1024 * 1024 - } - - return memLimit, nil -} - func Trunc(s string, maxlen int) string { if len(s) <= maxlen { return s @@ -492,9 +419,7 @@ func NewTruncIndex(ids []string) (idx *TruncIndex) { return } -func (idx *TruncIndex) Add(id string) error { - idx.Lock() - defer idx.Unlock() +func (idx *TruncIndex) addId(id string) error { if strings.Contains(id, " ") { return fmt.Errorf("Illegal character: ' '") } @@ -503,10 +428,31 @@ func (idx *TruncIndex) Add(id string) error { } idx.ids[id] = true idx.bytes = append(idx.bytes, []byte(id+" ")...) + return nil +} + +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addId(id); err != nil { + return err + } idx.index = suffixarray.New(idx.bytes) return nil } +func (idx *TruncIndex) AddWithoutSuffixarrayUpdate(id string) error { + idx.Lock() + defer idx.Unlock() + return idx.addId(id) +} + +func (idx *TruncIndex) UpdateSuffixarray() { + idx.Lock() + defer idx.Unlock() + idx.index = suffixarray.New(idx.bytes) +} + func (idx *TruncIndex) Delete(id string) error { idx.Lock() defer idx.Unlock() @@ -568,7 +514,7 @@ func GenerateRandomID() string { // if we try to parse the truncated for as an int and we don't have // an error then the value is all numberic and causes issues when // used as a hostname. ref #3869 - if _, err := strconv.Atoi(TruncateID(value)); err == nil { + if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { continue } return value @@ -875,22 +821,6 @@ func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { return fmt.Sprintf("%s://%s:%d", proto, host, port), nil } -func GetReleaseVersion() string { - resp, err := http.Get("https://get.docker.io/latest") - if err != nil { - return "" - } - defer resp.Body.Close() - if resp.ContentLength > 24 || resp.StatusCode != 200 { - return "" - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "" - } - return strings.TrimSpace(string(body)) -} - // Get a repos name and returns the right reposName + tag // The tag can be confusing because of a port in a repository name. // Ex: localhost.localdomain:5000/samalba/hipache:latest @@ -1091,3 +1021,70 @@ func ParseKeyValueOpt(opt string) (string, string, error) { } return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } + +// TreeSize walks a directory tree and returns its total size in bytes. +func TreeSize(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string) error { + var finalError error + + filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + _, err = filepath.Rel(srcPath, filePath) + if err != nil && os.IsPermission(err) { + return nil + } + + if _, err := os.Stat(filePath); err != nil && os.IsPermission(err) { + finalError = fmt.Errorf("can't stat '%s'", filePath) + return err + } + // skip checking if symlinks point to non-existing files, such symlinks can be useful + lstat, _ := os.Lstat(filePath) + if lstat.Mode()&os.ModeSymlink == os.ModeSymlink { + return err + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + finalError = fmt.Errorf("no permission to read from '%s'", filePath) + return err + } else { + currentFile.Close() + } + } + return nil + }) + return finalError +} diff --git a/utils/utils_test.go b/utils/utils_test.go index ccd212202c..63d722ed07 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -6,7 +6,6 @@ import ( "io" "io/ioutil" "os" - "strings" "testing" ) @@ -271,54 +270,6 @@ func TestCompareKernelVersion(t *testing.T) { -1) } -func TestHumanSize(t *testing.T) { - - size := strings.Trim(HumanSize(1000), " \t") - expect := "1 kB" - if size != expect { - t.Errorf("1000 -> expected '%s', got '%s'", expect, size) - } - - size = strings.Trim(HumanSize(1024), " \t") - expect = "1.024 kB" - if size != expect { - t.Errorf("1024 -> expected '%s', got '%s'", expect, size) - } -} - -func TestRAMInBytes(t *testing.T) { - assertRAMInBytes(t, "32", false, 32) - assertRAMInBytes(t, "32b", false, 32) - assertRAMInBytes(t, "32B", false, 32) - assertRAMInBytes(t, "32k", false, 32*1024) - assertRAMInBytes(t, "32K", false, 32*1024) - assertRAMInBytes(t, "32kb", false, 32*1024) - assertRAMInBytes(t, "32Kb", false, 32*1024) - assertRAMInBytes(t, "32Mb", false, 32*1024*1024) - assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024) - - assertRAMInBytes(t, "", true, -1) - assertRAMInBytes(t, "hello", true, -1) - assertRAMInBytes(t, "-32", true, -1) - assertRAMInBytes(t, " 32 ", true, -1) - assertRAMInBytes(t, "32 mb", true, -1) - assertRAMInBytes(t, "32m b", true, -1) - assertRAMInBytes(t, "32bm", true, -1) -} - -func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) { - actualBytes, err := RAMInBytes(size) - if (err != nil) && !expectError { - t.Errorf("Unexpected error parsing '%s': %s", size, err) - } - if (err == nil) && expectError { - t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes) - } - if actualBytes != expectedBytes { - t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes) - } -} - func TestParseHost(t *testing.T) { var ( defaultHttpHost = "127.0.0.1" @@ -351,7 +302,7 @@ func TestParseHost(t *testing.T) { if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:4243"); err == nil { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:2375"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } } diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go index e8b973c1fa..e363aa793e 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go @@ -38,6 +38,7 @@ const ( TypeXGlobalHeader = 'g' // global extended header TypeGNULongName = 'L' // Next file has a long name TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file ) // A Header represents a single header in a tar archive. diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go index 7cb6e649c7..920a9b08f9 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go @@ -29,12 +29,57 @@ const maxNanoSecondIntSize = 9 // The Next method advances to the next file in the archive (including the first), // and then it can be treated as an io.Reader to access the file's data. type Reader struct { - r io.Reader - err error - nb int64 // number of unread bytes for current file entry - pad int64 // amount of padding (ignored) after current file entry + r io.Reader + err error + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry } +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive. +type sparseFileReader struct { + rfr *regFileReader // reads the sparse-encoded file data + sp []sparseEntry // the sparse map for the file + pos int64 // keeps track of file position + tot int64 // total size of the file +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// Keywords for old GNU sparse headers +const ( + oldGNUSparseMainHeaderOffset = 386 + oldGNUSparseMainHeaderIsExtendedOffset = 482 + oldGNUSparseMainHeaderNumEntries = 4 + oldGNUSparseExtendedHeaderIsExtendedOffset = 504 + oldGNUSparseExtendedHeaderNumEntries = 21 + oldGNUSparseOffsetSize = 12 + oldGNUSparseNumBytesSize = 12 +) + // NewReader creates a new Reader reading from r. func NewReader(r io.Reader) *Reader { return &Reader{r: r} } @@ -64,6 +109,18 @@ func (tr *Reader) Next() (*Header, error) { tr.skipUnread() hdr = tr.readHeader() mergePAX(hdr, headers) + + // Check for a PAX format sparse file + sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers) + if err != nil { + tr.err = err + return nil, err + } + if sp != nil { + // Current file is a PAX format GNU sparse file. + // Set the current file reader to a sparse file reader. + tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} + } return hdr, nil case TypeGNULongName: // We have a GNU long name header. Its contents are the real file name. @@ -87,6 +144,67 @@ func (tr *Reader) Next() (*Header, error) { return hdr, tr.err } +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + // mergePAX merges well known headers according to PAX standard. // In general headers with the same name as those found // in the header struct overwrite those found in the header @@ -194,6 +312,11 @@ func parsePAX(r io.Reader) (map[string]string, error) { if err != nil { return nil, err } + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. + var sparseMap bytes.Buffer + headers := make(map[string]string) // Each record is constructed as // "%d %s=%s\n", length, keyword, value @@ -211,7 +334,7 @@ func parsePAX(r io.Reader) (map[string]string, error) { return nil, ErrHeader } // Extract everything between the decimal and the n -1 on the - // beginning to to eat the ' ', -1 on the end to skip the newline. + // beginning to eat the ' ', -1 on the end to skip the newline. var record []byte record, buf = buf[sp+1:n-1], buf[n:] // The first equals is guaranteed to mark the end of the key. @@ -221,7 +344,21 @@ func parsePAX(r io.Reader) (map[string]string, error) { return nil, ErrHeader } key, value := record[:eq], record[eq+1:] - headers[string(key)] = string(value) + + keyStr := string(key) + if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { + // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. + sparseMap.Write(value) + sparseMap.Write([]byte{','}) + } else { + // Normal key. Set the value in the headers map. + headers[keyStr] = string(value) + } + } + if sparseMap.Len() != 0 { + // Add sparse info to headers, chopping off the extra comma + sparseMap.Truncate(sparseMap.Len() - 1) + headers[paxGNUSparseMap] = sparseMap.String() } return headers, nil } @@ -268,8 +405,8 @@ func (tr *Reader) octal(b []byte) int64 { // skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding. func (tr *Reader) skipUnread() { - nr := tr.nb + tr.pad // number of bytes to skip - tr.nb, tr.pad = 0, 0 + nr := tr.numBytes() + tr.pad // number of bytes to skip + tr.curr, tr.pad = nil, 0 if sr, ok := tr.r.(io.Seeker); ok { if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil { return @@ -331,14 +468,14 @@ func (tr *Reader) readHeader() *Header { // so its magic bytes, like the rest of the block, are NULs. magic := string(s.next(8)) // contains version field as well. var format string - switch magic { - case "ustar\x0000": // POSIX tar (1003.1-1988) + switch { + case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) if string(header[508:512]) == "tar\x00" { format = "star" } else { format = "posix" } - case "ustar \x00": // old GNU tar + case magic == "ustar \x00": // old GNU tar format = "gnu" } @@ -373,30 +510,308 @@ func (tr *Reader) readHeader() *Header { // Maximum value of hdr.Size is 64 GB (12 octal digits), // so there's no risk of int64 overflowing. - tr.nb = int64(hdr.Size) - tr.pad = -tr.nb & (blockSize - 1) // blockSize is a power of two + nb := int64(hdr.Size) + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + + // Set the current file reader. + tr.curr = ®FileReader{r: tr.r, nb: nb} + + // Check for old GNU sparse format entry. + if hdr.Typeflag == TypeGNUSparse { + // Get the real size of the file. + hdr.Size = tr.octal(header[483:495]) + + // Read the sparse map. + sp := tr.readOldGNUSparseMap(header) + if tr.err != nil { + return nil + } + // Current file is a GNU sparse file. Update the current file reader. + tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} + } return hdr } +// A sparseEntry holds a single entry in a sparse file's sparse map. +// A sparse entry indicates the offset and size in a sparse file of a +// block of data. +type sparseEntry struct { + offset int64 + numBytes int64 +} + +// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, +// then one or more extension headers are used to store the rest of the sparse map. +func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { + isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 + spCap := oldGNUSparseMainHeaderNumEntries + if isExtended { + spCap += oldGNUSparseExtendedHeaderNumEntries + } + sp := make([]sparseEntry, 0, spCap) + s := slicer(header[oldGNUSparseMainHeaderOffset:]) + + // Read the four entries from the main tar header + for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { + offset := tr.octal(s.next(oldGNUSparseOffsetSize)) + numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) + if tr.err != nil { + tr.err = ErrHeader + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + for isExtended { + // There are more entries. Read an extension header and parse its entries. + sparseHeader := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { + return nil + } + isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 + s = slicer(sparseHeader) + for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { + offset := tr.octal(s.next(oldGNUSparseOffsetSize)) + numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) + if tr.err != nil { + tr.err = ErrHeader + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + } + return sp +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0. +// The sparse map is stored just before the file data and padded out to the nearest block boundary. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + buf := make([]byte, 2*blockSize) + sparseHeader := buf[:blockSize] + + // readDecimal is a helper function to read a decimal integer from the sparse map + // while making sure to read from the file in blocks of size blockSize + readDecimal := func() (int64, error) { + // Look for newline + nl := bytes.IndexByte(sparseHeader, '\n') + if nl == -1 { + if len(sparseHeader) >= blockSize { + // This is an error + return 0, ErrHeader + } + oldLen := len(sparseHeader) + newLen := oldLen + blockSize + if cap(sparseHeader) < newLen { + // There's more header, but we need to make room for the next block + copy(buf, sparseHeader) + sparseHeader = buf[:newLen] + } else { + // There's more header, and we can just reslice + sparseHeader = sparseHeader[:newLen] + } + + // Now that sparseHeader is large enough, read next block + if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil { + return 0, err + } + + // Look for a newline in the new data + nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n') + if nl == -1 { + // This is an error + return 0, ErrHeader + } + nl += oldLen // We want the position from the beginning + } + // Now that we've found a newline, read a number + n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0) + if err != nil { + return 0, ErrHeader + } + + // Update sparseHeader to consume this number + sparseHeader = sparseHeader[nl+1:] + return n, nil + } + + // Read the first block + if _, err := io.ReadFull(r, sparseHeader); err != nil { + return nil, err + } + + // The first line contains the number of entries + numEntries, err := readDecimal() + if err != nil { + return nil, err + } + + // Read all the entries + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + // Read the offset + offset, err := readDecimal() + if err != nil { + return nil, err + } + // Read numBytes + numBytes, err := readDecimal() + if err != nil { + return nil, err + } + + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1. +// The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) { + // Get number of entries + numEntriesStr, ok := headers[paxGNUSparseNumBlocks] + if !ok { + return nil, ErrHeader + } + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) + if err != nil { + return nil, ErrHeader + } + + sparseMap := strings.Split(headers[paxGNUSparseMap], ",") + + // There should be two numbers in sparseMap for each entry + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + // Read reads from the current entry in the tar archive. // It returns 0, io.EOF when it reaches the end of that entry, // until Next is called to advance to the next entry. func (tr *Reader) Read(b []byte) (n int, err error) { - if tr.nb == 0 { + if tr.curr == nil { + return 0, io.EOF + } + n, err = tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { // file consumed return 0, io.EOF } - - if int64(len(b)) > tr.nb { - b = b[0:tr.nb] + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] } - n, err = tr.r.Read(b) - tr.nb -= int64(n) + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) - if err == io.EOF && tr.nb > 0 { + if err == io.EOF && rfr.nb > 0 { err = io.ErrUnexpectedEOF } - tr.err = err return } + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// readHole reads a sparse file hole ending at offset toOffset +func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int { + n64 := toOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + if len(sfr.sp) == 0 { + // No more data fragments to read from. + if sfr.pos < sfr.tot { + // We're in the last hole + n = sfr.readHole(b, sfr.tot) + return + } + // Otherwise, we're at the end of the file + return 0, io.EOF + } + if sfr.pos < sfr.sp[0].offset { + // We're in a hole + n = sfr.readHole(b, sfr.sp[0].offset) + return + } + + // We're not in a hole, so we'll read from the next data fragment + posInFragment := sfr.pos - sfr.sp[0].offset + bytesLeft := sfr.sp[0].numBytes - posInFragment + if int64(len(b)) > bytesLeft { + b = b[0:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + + if int64(n) == bytesLeft { + // We're done with this fragment + sfr.sp = sfr.sp[1:] + } + + if err == io.EOF && sfr.pos < sfr.tot { + // We reached the end of the last fragment's data, but there's a final hole + err = nil + } + return +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.nb +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go index f84dbebe98..9601ffe459 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go @@ -9,6 +9,7 @@ import ( "crypto/md5" "fmt" "io" + "io/ioutil" "os" "reflect" "strings" @@ -54,8 +55,92 @@ var gnuTarTest = &untarTest{ }, } +var sparseTarTest = &untarTest{ + file: "testdata/sparse-formats.tar", + headers: []*Header{ + { + Name: "sparse-gnu", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392395740, 0), + Typeflag: 0x53, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392342187, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.1", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392340456, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-1.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392337404, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "end", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 4, + ModTime: time.Unix(1392398319, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + }, + cksums: []string{ + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "b0061974914468de549a2af8ced10316", + }, +} + var untarTests = []*untarTest{ gnuTarTest, + sparseTarTest, { file: "testdata/star.tar", headers: []*Header{ @@ -386,7 +471,7 @@ func TestParsePAXHeader(t *testing.T) { func TestParsePAXTime(t *testing.T) { // Some valid PAX time values timestamps := map[string]time.Time{ - "1350244992.023960108": time.Unix(1350244992, 23960108), // The commoon case + "1350244992.023960108": time.Unix(1350244992, 23960108), // The common case "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value "1350244992": time.Unix(1350244992, 0), // Low precision value @@ -423,3 +508,236 @@ func TestMergePAX(t *testing.T) { t.Errorf("incorrect merge: got %+v, want %+v", hdr, want) } } + +func TestSparseEndToEnd(t *testing.T) { + test := sparseTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + headers := test.headers + cksums := test.cksums + nread := 0 + + // loop over all files + for ; ; nread++ { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + break + } + + // check the header + if !reflect.DeepEqual(*hdr, *headers[nread]) { + t.Errorf("Incorrect header:\nhave %+v\nwant %+v", + *hdr, headers[nread]) + } + + // read and checksum the file data + h := md5.New() + _, err = io.Copy(h, tr) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // verify checksum + have := fmt.Sprintf("%x", h.Sum(nil)) + want := cksums[nread] + if want != have { + t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want) + } + } + if nread != len(headers) { + t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread) + } +} + +type sparseFileReadTest struct { + sparseData []byte + sparseMap []sparseEntry + realSize int64 + expected []byte +} + +var sparseFileReadTests = []sparseFileReadTest{ + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + realSize: 8, + expected: []byte("ab\x00\x00\x00cde"), + }, + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + realSize: 10, + expected: []byte("ab\x00\x00\x00cde\x00\x00"), + }, + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + realSize: 8, + expected: []byte("\x00abc\x00\x00de"), + }, + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + realSize: 10, + expected: []byte("\x00abc\x00\x00de\x00\x00"), + }, + { + sparseData: []byte(""), + sparseMap: nil, + realSize: 2, + expected: []byte("\x00\x00"), + }, +} + +func TestSparseFileReader(t *testing.T) { + for i, test := range sparseFileReadTests { + r := bytes.NewReader(test.sparseData) + nb := int64(r.Len()) + sfr := &sparseFileReader{ + rfr: ®FileReader{r: r, nb: nb}, + sp: test.sparseMap, + pos: 0, + tot: test.realSize, + } + if sfr.numBytes() != nb { + t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb) + } + buf, err := ioutil.ReadAll(sfr) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + } + if e := test.expected; !bytes.Equal(buf, e) { + t.Errorf("test %d: Contents = %v, want %v", i, buf, e) + } + if sfr.numBytes() != 0 { + t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i) + } + } +} + +func TestSparseIncrementalRead(t *testing.T) { + sparseMap := []sparseEntry{{10, 2}} + sparseData := []byte("Go") + expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00" + + r := bytes.NewReader(sparseData) + nb := int64(r.Len()) + sfr := &sparseFileReader{ + rfr: ®FileReader{r: r, nb: nb}, + sp: sparseMap, + pos: 0, + tot: int64(len(expected)), + } + + // We'll read the data 6 bytes at a time, with a hole of size 10 at + // the beginning and one of size 8 at the end. + var outputBuf bytes.Buffer + buf := make([]byte, 6) + for { + n, err := sfr.Read(buf) + if err == io.EOF { + break + } + if err != nil { + t.Errorf("Read: unexpected error %v\n", err) + } + if n > 0 { + _, err := outputBuf.Write(buf[:n]) + if err != nil { + t.Errorf("Write: unexpected error %v\n", err) + } + } + } + got := outputBuf.String() + if got != expected { + t.Errorf("Contents = %v, want %v", got, expected) + } +} + +func TestReadGNUSparseMap0x1(t *testing.T) { + headers := map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + } + expected := []sparseEntry{ + {offset: 0, numBytes: 5}, + {offset: 10, numBytes: 5}, + {offset: 20, numBytes: 5}, + {offset: 30, numBytes: 5}, + } + + sp, err := readGNUSparseMap0x1(headers) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(sp, expected) { + t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected) + } +} + +func TestReadGNUSparseMap1x0(t *testing.T) { + // This test uses lots of holes so the sparse header takes up more than two blocks + numEntries := 100 + expected := make([]sparseEntry, 0, numEntries) + sparseMap := new(bytes.Buffer) + + fmt.Fprintf(sparseMap, "%d\n", numEntries) + for i := 0; i < numEntries; i++ { + offset := int64(2048 * i) + numBytes := int64(1024) + expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes}) + fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes) + } + + // Make the header the smallest multiple of blockSize that fits the sparseMap + headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize + bufLen := blockSize * headerBlocks + buf := make([]byte, bufLen) + copy(buf, sparseMap.Bytes()) + + // Get an reader to read the sparse map + r := bytes.NewReader(buf) + + // Read the sparse map + sp, err := readGNUSparseMap1x0(r) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(sp, expected) { + t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected) + } +} + +func TestUninitializedRead(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + _, err = tr.Read([]byte{}) + if err == nil || err != io.EOF { + t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF) + } + +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar new file mode 100644 index 0000000000..8bd4e74d50 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar new file mode 100644 index 0000000000..5960ee8247 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go index 9ee9499297..6eff6f6f84 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go @@ -218,8 +218,8 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil) // Use the ustar magic if we used ustar long names. - if len(prefix) > 0 { - copy(header[257:265], []byte("ustar\000")) + if len(prefix) > 0 && !tw.usedBinary { + copy(header[257:265], []byte("ustar\x00")) } } } diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go index 2b9ea658db..512fab1a6f 100644 --- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go @@ -103,6 +103,29 @@ var writerTests = []*writerTest{ }, }, }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt + // tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar + { + file: "testdata/writer-big-long.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "16gig.txt", + Mode: 0644, + Uid: 1000, + Gid: 1000, + Size: 16 << 30, + ModTime: time.Unix(1399583047, 0), + Typeflag: '0', + Uname: "guillaume", + Gname: "guillaume", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, // This file was produced using gnu tar 1.17 // gnutar -b 4 --format=ustar (longname/)*15 + file.txt { diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/methods.go b/vendor/src/github.com/coreos/go-systemd/dbus/methods.go index 11d5cda945..a60de059e6 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/methods.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/methods.go @@ -204,7 +204,7 @@ func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]i // to modify. properties are the settings to set, encoded as an array of property // name and value pairs. func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.sysobj.Call("SetUnitProperties", 0, name, runtime, properties).Store() + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() } func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { @@ -253,6 +253,48 @@ type UnitStatus struct { JobPath dbus.ObjectPath // The job object path } +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + // EnableUnitFiles() may be used to enable one or more units in the system (by // creating symlinks to them in /etc or /run). // @@ -317,7 +359,7 @@ type EnableUnitFileChange struct { // symlink. func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { result := make([][]interface{}, 0) - err := c.sysobj.Call("DisableUnitFiles", 0, files, runtime).Store(&result) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) if err != nil { return nil, err } diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go index d943e7ebfc..8c7ab93eb3 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go @@ -36,36 +36,38 @@ func setupConn(t *testing.T) *Conn { return conn } +func findFixture(target string, t *testing.T) string { + abs, err := filepath.Abs("../fixtures/" + target) + if err != nil { + t.Fatal(err) + } + return abs +} + func setupUnit(target string, conn *Conn, t *testing.T) { // Blindly stop the unit in case it is running conn.StopUnit(target, "replace") // Blindly remove the symlink in case it exists targetRun := filepath.Join("/run/systemd/system/", target) - err := os.Remove(targetRun) - - // 1. Enable the unit - abs, err := filepath.Abs("../fixtures/" + target) - if err != nil { - t.Fatal(err) - } + os.Remove(targetRun) +} +func linkUnit(target string, conn *Conn, t *testing.T) { + abs := findFixture(target, t) fixture := []string{abs} - install, changes, err := conn.EnableUnitFiles(fixture, true, true) + changes, err := conn.LinkUnitFiles(fixture, true, true) if err != nil { t.Fatal(err) } - if install != false { - t.Fatal("Install was true") - } - if len(changes) < 1 { t.Fatalf("Expected one change, got %v", changes) } - if changes[0].Filename != targetRun { + runPath := filepath.Join("/run/systemd/system/", target) + if changes[0].Filename != runPath { t.Fatal("Unexpected target filename") } } @@ -76,6 +78,7 @@ func TestStartStopUnit(t *testing.T) { conn := setupConn(t) setupUnit(target, conn, t) + linkUnit(target, conn, t) // 2. Start the unit job, err := conn.StartUnit(target, "replace") @@ -84,7 +87,7 @@ func TestStartStopUnit(t *testing.T) { } if job != "done" { - t.Fatal("Job is not done, %v", job) + t.Fatal("Job is not done:", job) } units, err := conn.ListUnits() @@ -130,28 +133,41 @@ func TestEnableDisableUnit(t *testing.T) { conn := setupConn(t) setupUnit(target, conn, t) + abs := findFixture(target, t) + runPath := filepath.Join("/run/systemd/system/", target) - abs, err := filepath.Abs("../fixtures/" + target) + // 1. Enable the unit + install, changes, err := conn.EnableUnitFiles([]string{abs}, true, true) if err != nil { t.Fatal(err) } - path := filepath.Join("/run/systemd/system/", target) + if install != false { + t.Fatal("Install was true") + } + + if len(changes) < 1 { + t.Fatalf("Expected one change, got %v", changes) + } + + if changes[0].Filename != runPath { + t.Fatal("Unexpected target filename") + } // 2. Disable the unit - changes, err := conn.DisableUnitFiles([]string{abs}, true) + dChanges, err := conn.DisableUnitFiles([]string{abs}, true) if err != nil { t.Fatal(err) } - if len(changes) != 1 { - t.Fatalf("Changes should include the path, %v", changes) + if len(dChanges) != 1 { + t.Fatalf("Changes should include the path, %v", dChanges) } - if changes[0].Filename != path { - t.Fatalf("Change should include correct filename, %+v", changes[0]) + if dChanges[0].Filename != runPath { + t.Fatalf("Change should include correct filename, %+v", dChanges[0]) } - if changes[0].Destination != "" { - t.Fatalf("Change destination should be empty, %+v", changes[0]) + if dChanges[0].Destination != "" { + t.Fatalf("Change destination should be empty, %+v", dChanges[0]) } } @@ -230,7 +246,7 @@ func TestSetUnitProperties(t *testing.T) { value := info["CPUShares"].(uint64) if value != 1023 { - t.Fatal("CPUShares of unit is not 1023, %s", value) + t.Fatal("CPUShares of unit is not 1023:", value) } } @@ -250,7 +266,7 @@ func TestStartStopTransientUnit(t *testing.T) { } if job != "done" { - t.Fatal("Job is not done, %v", job) + t.Fatal("Job is not done:", job) } units, err := conn.ListUnits() @@ -295,6 +311,7 @@ func TestConnJobListener(t *testing.T) { conn := setupConn(t) setupUnit(target, conn, t) + linkUnit(target, conn, t) jobSize := len(conn.jobListener.jobs) diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go index 3d896d896f..fcd29b6e8f 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go @@ -40,7 +40,6 @@ func (c *Conn) Subscribe() error { err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() if err != nil { - c.sysconn.Close() return err } @@ -51,7 +50,6 @@ func (c *Conn) Subscribe() error { func (c *Conn) Unsubscribe() error { err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() if err != nil { - c.sysconn.Close() return err } @@ -69,7 +67,11 @@ func (c *Conn) initDispatch() { go func() { for { - signal := <-ch + signal, ok := <-ch + if !ok { + return + } + switch signal.Name { case "org.freedesktop.systemd1.Manager.JobRemoved": c.jobComplete(signal) diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go index db600850c2..4ecd15376d 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go @@ -25,6 +25,7 @@ func TestSubscriptionSetUnit(t *testing.T) { subSet.Add(target) setupUnit(target, conn, t) + linkUnit(target, conn, t) job, err := conn.StartUnit(target, "replace") if err != nil { @@ -47,7 +48,7 @@ func TestSubscriptionSetUnit(t *testing.T) { tCh, ok := changes[target] if !ok { - t.Fatal("Unexpected event %v", changes) + t.Fatal("Unexpected event:", changes) } if tCh.ActiveState == "active" && tCh.Name == target { @@ -63,5 +64,3 @@ func TestSubscriptionSetUnit(t *testing.T) { success: return } - - diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go index 6f4d0b32a6..f2b5dfc28c 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go @@ -47,6 +47,7 @@ func TestSubscribeUnit(t *testing.T) { evChan, errChan := conn.SubscribeUnits(time.Second) setupUnit(target, conn, t) + linkUnit(target, conn, t) job, err := conn.StartUnit(target, "replace") if err != nil { diff --git a/vendor/src/github.com/coreos/go-systemd/journal/send.go b/vendor/src/github.com/coreos/go-systemd/journal/send.go index a29bcbf0fa..b52e120988 100644 --- a/vendor/src/github.com/coreos/go-systemd/journal/send.go +++ b/vendor/src/github.com/coreos/go-systemd/journal/send.go @@ -119,7 +119,7 @@ func appendVariable(w io.Writer, name, value string) { fmt.Fprintln(w, value) } else { /* just write the variable and value all on one line */ - fmt.Fprintln(w, "%s=%s", name, value) + fmt.Fprintf(w, "%s=%s\n", name, value) } } diff --git a/vendor/src/github.com/coreos/go-systemd/login1/dbus.go b/vendor/src/github.com/coreos/go-systemd/login1/dbus.go new file mode 100644 index 0000000000..d00dd110b5 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/login1/dbus.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Integration with the systemd logind API. See http://www.freedesktop.org/wiki/Software/systemd/logind/ +package login1 + +import ( + "os" + "strconv" + + "github.com/godbus/dbus" +) + +const ( + dbusInterface = "org.freedesktop.login1.Manager" + dbusPath = "/org/freedesktop/login1" +) + +// Conn is a connection to systemds dbus endpoint. +type Conn struct { + conn *dbus.Conn + object *dbus.Object +} + +// New() establishes a connection to the system bus and authenticates. +func New() (*Conn, error) { + c := new(Conn) + + if err := c.initConnection(); err != nil { + return nil, err + } + + return c, nil +} + +func (c *Conn) initConnection() error { + var err error + c.conn, err = dbus.SystemBusPrivate() + if err != nil { + return err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = c.conn.Auth(methods) + if err != nil { + c.conn.Close() + return err + } + + err = c.conn.Hello() + if err != nil { + c.conn.Close() + return err + } + + c.object = c.conn.Object("org.freedesktop.login1", dbus.ObjectPath(dbusPath)) + + return nil +} + +// Reboot asks logind for a reboot optionally asking for auth. +func (c *Conn) Reboot(askForAuth bool) { + c.object.Call(dbusInterface+".Reboot", 0, askForAuth) +} diff --git a/vendor/src/github.com/coreos/go-systemd/login1/dbus_test.go b/vendor/src/github.com/coreos/go-systemd/login1/dbus_test.go new file mode 100644 index 0000000000..4439d37380 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/login1/dbus_test.go @@ -0,0 +1,30 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package login1 + +import ( + "testing" +) + +// TestNew ensures that New() works without errors. +func TestNew(t *testing.T) { + _, err := New() + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/src/github.com/gorilla/context/.travis.yml b/vendor/src/github.com/gorilla/context/.travis.yml new file mode 100644 index 0000000000..d87d465768 --- /dev/null +++ b/vendor/src/github.com/gorilla/context/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/vendor/src/github.com/gorilla/context/README.md b/vendor/src/github.com/gorilla/context/README.md index 8ee62b4263..c60a31b053 100644 --- a/vendor/src/github.com/gorilla/context/README.md +++ b/vendor/src/github.com/gorilla/context/README.md @@ -1,5 +1,6 @@ context ======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) gorilla/context is a general purpose registry for global request variables. diff --git a/vendor/src/github.com/gorilla/context/context.go b/vendor/src/github.com/gorilla/context/context.go index 35d65561f3..a7f7d85bb4 100644 --- a/vendor/src/github.com/gorilla/context/context.go +++ b/vendor/src/github.com/gorilla/context/context.go @@ -11,7 +11,7 @@ import ( ) var ( - mutex sync.Mutex + mutex sync.RWMutex data = make(map[*http.Request]map[interface{}]interface{}) datat = make(map[*http.Request]int64) ) @@ -19,42 +19,64 @@ var ( // Set stores a value for a given key in a given request. func Set(r *http.Request, key, val interface{}) { mutex.Lock() - defer mutex.Unlock() if data[r] == nil { data[r] = make(map[interface{}]interface{}) datat[r] = time.Now().Unix() } data[r][key] = val + mutex.Unlock() } // Get returns a value stored for a given key in a given request. func Get(r *http.Request, key interface{}) interface{} { - mutex.Lock() - defer mutex.Unlock() + mutex.RLock() if data[r] != nil { + mutex.RUnlock() return data[r][key] } + mutex.RUnlock() return nil } // GetOk returns stored value and presence state like multi-value return of map access. func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.Lock() - defer mutex.Unlock() + mutex.RLock() if _, ok := data[r]; ok { value, ok := data[r][key] + mutex.RUnlock() return value, ok } + mutex.RUnlock() return nil, false } +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + mutex.RUnlock() + return context + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map. It returns not +// ok if the request was never registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + mutex.RUnlock() + return context, ok +} + // Delete removes a value stored for a given key in a given request. func Delete(r *http.Request, key interface{}) { mutex.Lock() - defer mutex.Unlock() if data[r] != nil { delete(data[r], key) } + mutex.Unlock() } // Clear removes all values stored for a given request. @@ -63,8 +85,8 @@ func Delete(r *http.Request, key interface{}) { // variables at the end of a request lifetime. See ClearHandler(). func Clear(r *http.Request) { mutex.Lock() - defer mutex.Unlock() clear(r) + mutex.Unlock() } // clear is Clear without the lock. @@ -84,7 +106,6 @@ func clear(r *http.Request) { // periodically until the problem is fixed. func Purge(maxAge int) int { mutex.Lock() - defer mutex.Unlock() count := 0 if maxAge <= 0 { count = len(data) @@ -92,13 +113,14 @@ func Purge(maxAge int) int { datat = make(map[*http.Request]int64) } else { min := time.Now().Unix() - int64(maxAge) - for r, _ := range data { + for r := range data { if datat[r] < min { clear(r) count++ } } } + mutex.Unlock() return count } diff --git a/vendor/src/github.com/gorilla/context/context_test.go b/vendor/src/github.com/gorilla/context/context_test.go index ff9e2ad5fc..6ada8ec31f 100644 --- a/vendor/src/github.com/gorilla/context/context_test.go +++ b/vendor/src/github.com/gorilla/context/context_test.go @@ -24,6 +24,7 @@ func TestContext(t *testing.T) { } r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) // Get() assertEqual(Get(r, key1), nil) @@ -51,6 +52,26 @@ func TestContext(t *testing.T) { assertEqual(value, nil) assertEqual(ok, true) + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + // Delete() Delete(r, key1) assertEqual(Get(r, key1), nil) @@ -64,3 +85,77 @@ func TestContext(t *testing.T) { Clear(r) assertEqual(len(data), 0) } + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/vendor/src/github.com/gorilla/context/doc.go b/vendor/src/github.com/gorilla/context/doc.go index 297606455c..73c7400311 100644 --- a/vendor/src/github.com/gorilla/context/doc.go +++ b/vendor/src/github.com/gorilla/context/doc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. /* -Package gorilla/context stores values shared during a request lifetime. +Package context stores values shared during a request lifetime. For example, a router can set variables extracted from the URL and later application handlers can access those values, or it can be used to store diff --git a/vendor/src/github.com/gorilla/mux/.travis.yml b/vendor/src/github.com/gorilla/mux/.travis.yml new file mode 100644 index 0000000000..d87d465768 --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/vendor/src/github.com/gorilla/mux/README.md b/vendor/src/github.com/gorilla/mux/README.md index f6db41ad81..e60301b033 100644 --- a/vendor/src/github.com/gorilla/mux/README.md +++ b/vendor/src/github.com/gorilla/mux/README.md @@ -1,5 +1,6 @@ mux === +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) gorilla/mux is a powerful URL router and dispatcher. diff --git a/vendor/src/github.com/gorilla/mux/doc.go b/vendor/src/github.com/gorilla/mux/doc.go index 8ee5540a4f..b2deed34c4 100644 --- a/vendor/src/github.com/gorilla/mux/doc.go +++ b/vendor/src/github.com/gorilla/mux/doc.go @@ -134,7 +134,7 @@ the inner routes use it as base for their paths: // "/products/{key}/" s.HandleFunc("/{key}/", ProductHandler) // "/products/{key}/details" - s.HandleFunc("/{key}/details"), ProductDetailsHandler) + s.HandleFunc("/{key}/details", ProductDetailsHandler) Now let's see how to build registered URLs. diff --git a/vendor/src/github.com/gorilla/mux/mux.go b/vendor/src/github.com/gorilla/mux/mux.go index 385717394c..8b23c39d39 100644 --- a/vendor/src/github.com/gorilla/mux/mux.go +++ b/vendor/src/github.com/gorilla/mux/mux.go @@ -14,7 +14,7 @@ import ( // NewRouter returns a new router instance. func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route)} + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} } // Router registers routes to be matched and dispatches a handler. @@ -46,6 +46,8 @@ type Router struct { namedRoutes map[string]*Route // See Router.StrictSlash(). This defines the flag for new routes. strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool } // Match matches registered routes against the request. @@ -65,6 +67,14 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { // Clean path to canonical form and redirect. if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + w.Header().Set("Location", p) w.WriteHeader(http.StatusMovedPermanently) return @@ -82,7 +92,9 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { } handler = r.NotFoundHandler } - defer context.Clear(req) + if !r.KeepContext { + defer context.Clear(req) + } handler.ServeHTTP(w, req) } @@ -97,14 +109,20 @@ func (r *Router) GetRoute(name string) *Route { return r.getNamedRoutes()[name] } -// StrictSlash defines the slash behavior for new routes. +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. // // When true, if the route path is "/path/", accessing "/path" will redirect -// to the former and vice versa. +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. // -// Special case: when a route sets a path prefix, strict slash is -// automatically set to false for that route because the redirect behavior -// can't be determined for prefixes. +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. func (r *Router) StrictSlash(value bool) *Router { r.strictSlash = value return r diff --git a/vendor/src/github.com/gorilla/mux/mux_test.go b/vendor/src/github.com/gorilla/mux/mux_test.go index 55159bd10d..0e2e48067a 100644 --- a/vendor/src/github.com/gorilla/mux/mux_test.go +++ b/vendor/src/github.com/gorilla/mux/mux_test.go @@ -8,16 +8,19 @@ import ( "fmt" "net/http" "testing" + + "github.com/gorilla/context" ) type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - host string // the expected host of the match - path string // the expected path of the match - shouldMatch bool // whether the request is expected to match the route at all + title string // title of the test + route *Route // the route being tested + request *http.Request // a request to test the route + vars map[string]string // the expected vars of the match + host string // the expected host of the match + path string // the expected path of the match + shouldMatch bool // whether the request is expected to match the route at all + shouldRedirect bool // whether the request should result in a redirect } func TestHost(t *testing.T) { @@ -149,6 +152,33 @@ func TestPath(t *testing.T) { path: "/111/222/333", shouldMatch: true, }, + { + title: "Path route, match with trailing slash in request and path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + }, + { + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: false, + }, { title: "Path route, wrong path in request in request URL", route: new(Route).Path("/111/222/333"), @@ -212,6 +242,15 @@ func TestPathPrefix(t *testing.T) { path: "/111", shouldMatch: true, }, + { + title: "PathPrefix route, match substring", + route: new(Route).PathPrefix("/1"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/1", + shouldMatch: true, + }, { title: "PathPrefix route, URL prefix in request does not match", route: new(Route).PathPrefix("/111"), @@ -414,6 +453,15 @@ func TestQueries(t *testing.T) { path: "", shouldMatch: true, }, + { + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, { title: "Queries route, bad query", route: new(Route).Queries("foo", "bar", "baz", "ding"), @@ -568,26 +616,74 @@ func TestNamedRoutes(t *testing.T) { } func TestStrictSlash(t *testing.T) { - var r *Router - var req *http.Request - var route *Route - var match *RouteMatch - var matched bool - - // StrictSlash should be ignored for path prefix. - // So we register a route ending in slash but it doesn't attempt to add - // the slash for a path not ending in slash. - r = NewRouter() + r := NewRouter() r.StrictSlash(true) - route = r.NewRoute().PathPrefix("/static/") - req, _ = http.NewRequest("GET", "http://localhost/static/logo.png", nil) - match = new(RouteMatch) - matched = r.Match(req, match) - if !matched { - t.Errorf("Should match request %q -- %v", req.URL.Path, getRouteTemplate(route)) + + tests := []routeTest{ + { + title: "Redirect path without slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path with slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Redirect path with slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path without slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Propagate StrictSlash to subrouters", + route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), + request: newRequest("GET", "http://localhost/static/images"), + vars: map[string]string{}, + host: "", + path: "/static/images/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Ignore StrictSlash for path prefix", + route: r.NewRoute().PathPrefix("/static/"), + request: newRequest("GET", "http://localhost/static/logo.png"), + vars: map[string]string{}, + host: "", + path: "/static/", + shouldMatch: true, + shouldRedirect: false, + }, } - if match.Handler != nil { - t.Errorf("Should not redirect") + + for _, test := range tests { + testRoute(t, test) } } @@ -616,6 +712,7 @@ func testRoute(t *testing.T, test routeTest) { host := test.host path := test.path url := test.host + test.path + shouldRedirect := test.shouldRedirect var match RouteMatch ok := route.Match(request, &match) @@ -653,6 +750,84 @@ func testRoute(t *testing.T, test routeTest) { return } } + if shouldRedirect && match.Handler == nil { + t.Errorf("(%v) Did not redirect", test.title) + return + } + if !shouldRedirect && match.Handler != nil { + t.Errorf("(%v) Unexpected redirect", test.title) + return + } + } +} + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} + +type TestA301ResponseWriter struct { + hh http.Header + status int +} + +func (ho TestA301ResponseWriter) Header() http.Header { + return http.Header(ho.hh) +} + +func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { + return 0, nil +} + +func (ho TestA301ResponseWriter) WriteHeader(code int) { + ho.status = code +} + +func Test301Redirect(t *testing.T) { + m := make(http.Header) + + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + + res := TestA301ResponseWriter{ + hh: m, + status: 0, + } + r.ServeHTTP(&res, req) + + if "http://localhost/api/?abc=def" != res.hh["Location"][0] { + t.Errorf("Should have complete URL with query string") } } diff --git a/vendor/src/github.com/gorilla/mux/old_test.go b/vendor/src/github.com/gorilla/mux/old_test.go index 7e266bb695..42530590e7 100644 --- a/vendor/src/github.com/gorilla/mux/old_test.go +++ b/vendor/src/github.com/gorilla/mux/old_test.go @@ -96,8 +96,8 @@ func TestRouteMatchers(t *testing.T) { method = "GET" headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} resultVars = map[bool]map[string]string{ - true: map[string]string{"var1": "www", "var2": "product", "var3": "42"}, - false: map[string]string{}, + true: {"var1": "www", "var2": "product", "var3": "42"}, + false: {}, } } @@ -110,8 +110,8 @@ func TestRouteMatchers(t *testing.T) { method = "POST" headers = map[string]string{"Content-Type": "application/json"} resultVars = map[bool]map[string]string{ - true: map[string]string{"var4": "google", "var5": "product", "var6": "42"}, - false: map[string]string{}, + true: {"var4": "google", "var5": "product", "var6": "42"}, + false: {}, } } diff --git a/vendor/src/github.com/gorilla/mux/regexp.go b/vendor/src/github.com/gorilla/mux/regexp.go index 4c3482bfbd..925f268abe 100644 --- a/vendor/src/github.com/gorilla/mux/regexp.go +++ b/vendor/src/github.com/gorilla/mux/regexp.go @@ -98,12 +98,13 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*rout } // Done! return &routeRegexp{ - template: template, - matchHost: matchHost, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, + template: template, + matchHost: matchHost, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, }, nil } @@ -114,6 +115,8 @@ type routeRegexp struct { template string // True for host match, false for path match. matchHost bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool // Expanded regexp. regexp *regexp.Regexp // Reverse template. @@ -216,7 +219,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) m.Vars[v] = pathVars[k+1] } // Check if we should redirect. - if r.strictSlash { + if v.path.strictSlash { p1 := strings.HasSuffix(req.URL.Path, "/") p2 := strings.HasSuffix(v.path.template, "/") if p1 != p2 { diff --git a/vendor/src/github.com/gorilla/mux/route.go b/vendor/src/github.com/gorilla/mux/route.go index cb538ea4ec..5cb2526d61 100644 --- a/vendor/src/github.com/gorilla/mux/route.go +++ b/vendor/src/github.com/gorilla/mux/route.go @@ -259,7 +259,8 @@ func (r *Route) Methods(methods ...string) *Route { // Path ----------------------------------------------------------------------- // Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". // Variables can define an optional regexp pattern to me matched: // // - {name} matches anything until the next slash. @@ -283,9 +284,16 @@ func (r *Route) Path(tpl string) *Route { // PathPrefix ----------------------------------------------------------------- -// PathPrefix adds a matcher for the URL path prefix. +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. func (r *Route) PathPrefix(tpl string) *Route { - r.strictSlash = false r.err = r.addRegexpMatcher(tpl, false, true) return r } @@ -328,7 +336,7 @@ func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { } // Schemes adds a matcher for URL schemes. -// It accepts a sequence schemes to be matched, e.g.: "http", "https". +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". func (r *Route) Schemes(schemes ...string) *Route { for k, v := range schemes { schemes[k] = strings.ToLower(v) diff --git a/vendor/src/github.com/kr/pty/ioctl.go b/vendor/src/github.com/kr/pty/ioctl.go new file mode 100644 index 0000000000..5b856e8711 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ioctl.go @@ -0,0 +1,11 @@ +package pty + +import "syscall" + +func ioctl(fd, cmd, ptr uintptr) error { + _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr) + if e != 0 { + return e + } + return nil +} diff --git a/vendor/src/github.com/kr/pty/ioctl_bsd.go b/vendor/src/github.com/kr/pty/ioctl_bsd.go new file mode 100644 index 0000000000..73b12c53cf --- /dev/null +++ b/vendor/src/github.com/kr/pty/ioctl_bsd.go @@ -0,0 +1,39 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package pty + +// from +const ( + _IOC_VOID uintptr = 0x20000000 + _IOC_OUT uintptr = 0x40000000 + _IOC_IN uintptr = 0x80000000 + _IOC_IN_OUT uintptr = _IOC_OUT | _IOC_IN + _IOC_DIRMASK = _IOC_VOID | _IOC_OUT | _IOC_IN + + _IOC_PARAM_SHIFT = 13 + _IOC_PARAM_MASK = (1 << _IOC_PARAM_SHIFT) - 1 +) + +func _IOC_PARM_LEN(ioctl uintptr) uintptr { + return (ioctl >> 16) & _IOC_PARAM_MASK +} + +func _IOC(inout uintptr, group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return inout | (param_len&_IOC_PARAM_MASK)<<16 | uintptr(group)<<8 | ioctl_num +} + +func _IO(group byte, ioctl_num uintptr) uintptr { + return _IOC(_IOC_VOID, group, ioctl_num, 0) +} + +func _IOR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_OUT, group, ioctl_num, param_len) +} + +func _IOW(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN, group, ioctl_num, param_len) +} + +func _IOWR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN_OUT, group, ioctl_num, param_len) +} diff --git a/vendor/src/github.com/kr/pty/ioctl_linux.go b/vendor/src/github.com/kr/pty/ioctl_linux.go new file mode 100644 index 0000000000..9fe7b0b0f9 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ioctl_linux.go @@ -0,0 +1,42 @@ +package pty + +// from +const ( + _IOC_NRBITS = 8 + _IOC_TYPEBITS = 8 + + _IOC_SIZEBITS = 14 + _IOC_DIRBITS = 2 + + _IOC_NRSHIFT = 0 + _IOC_TYPESHIFT = _IOC_NRSHIFT + _IOC_NRBITS + _IOC_SIZESHIFT = _IOC_TYPESHIFT + _IOC_TYPEBITS + _IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS + + _IOC_NONE uint = 0 + _IOC_WRITE uint = 1 + _IOC_READ uint = 2 +) + +func _IOC(dir uint, ioctl_type byte, nr byte, size uintptr) uintptr { + return (uintptr(dir)<<_IOC_DIRSHIFT | + uintptr(ioctl_type)<<_IOC_TYPESHIFT | + uintptr(nr)<<_IOC_NRSHIFT | + size<<_IOC_SIZESHIFT) +} + +func _IO(ioctl_type byte, nr byte) uintptr { + return _IOC(_IOC_NONE, ioctl_type, nr, 0) +} + +func _IOR(ioctl_type byte, nr byte, size uintptr) uintptr { + return _IOC(_IOC_READ, ioctl_type, nr, size) +} + +func _IOW(ioctl_type byte, nr byte, size uintptr) uintptr { + return _IOC(_IOC_WRITE, ioctl_type, nr, size) +} + +func _IOWR(ioctl_type byte, nr byte, size uintptr) uintptr { + return _IOC(_IOC_READ|_IOC_WRITE, ioctl_type, nr, size) +} diff --git a/vendor/src/github.com/kr/pty/mktypes.bash b/vendor/src/github.com/kr/pty/mktypes.bash new file mode 100755 index 0000000000..9952c88838 --- /dev/null +++ b/vendor/src/github.com/kr/pty/mktypes.bash @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +GOOSARCH="${GOOS}_${GOARCH}" +case "$GOOSARCH" in +_* | *_ | _) + echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 + exit 1 + ;; +esac + +GODEFS="go tool cgo -godefs" + +$GODEFS types.go |gofmt > ztypes_$GOARCH.go + +case $GOOS in +freebsd) + $GODEFS types_$GOOS.go |gofmt > ztypes_$GOOSARCH.go + ;; +esac diff --git a/vendor/src/github.com/kr/pty/pty_darwin.go b/vendor/src/github.com/kr/pty/pty_darwin.go index 597bb03e57..4f4d5ca26e 100644 --- a/vendor/src/github.com/kr/pty/pty_darwin.go +++ b/vendor/src/github.com/kr/pty/pty_darwin.go @@ -7,9 +7,6 @@ import ( "unsafe" ) -// see ioccom.h -const sys_IOCPARM_MASK = 0x1fff - func open() (pty, tty *os.File, err error) { p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) if err != nil { @@ -39,9 +36,13 @@ func open() (pty, tty *os.File, err error) { } func ptsname(f *os.File) (string, error) { - var n [(syscall.TIOCPTYGNAME >> 16) & sys_IOCPARM_MASK]byte + n := make([]byte, _IOC_PARM_LEN(syscall.TIOCPTYGNAME)) + + err := ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n[0]))) + if err != nil { + return "", err + } - ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n))) for i, c := range n { if c == 0 { return string(n[:i]), nil @@ -51,19 +52,9 @@ func ptsname(f *os.File) (string, error) { } func grantpt(f *os.File) error { - var u int - return ioctl(f.Fd(), syscall.TIOCPTYGRANT, uintptr(unsafe.Pointer(&u))) + return ioctl(f.Fd(), syscall.TIOCPTYGRANT, 0) } func unlockpt(f *os.File) error { - var u int - return ioctl(f.Fd(), syscall.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) -} - -func ioctl(fd, cmd, ptr uintptr) error { - _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr) - if e != 0 { - return syscall.ENOTTY - } - return nil + return ioctl(f.Fd(), syscall.TIOCPTYUNLK, 0) } diff --git a/vendor/src/github.com/kr/pty/pty_freebsd.go b/vendor/src/github.com/kr/pty/pty_freebsd.go index 13b64d722e..b341babd05 100644 --- a/vendor/src/github.com/kr/pty/pty_freebsd.go +++ b/vendor/src/github.com/kr/pty/pty_freebsd.go @@ -1,53 +1,73 @@ package pty import ( + "errors" "os" - "strconv" "syscall" "unsafe" ) -const ( - sys_TIOCGPTN = 0x4004740F - sys_TIOCSPTLCK = 0x40045431 -) +func posix_openpt(oflag int) (fd int, err error) { + r0, _, e1 := syscall.Syscall(syscall.SYS_POSIX_OPENPT, uintptr(oflag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} func open() (pty, tty *os.File, err error) { - p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) + fd, err := posix_openpt(syscall.O_RDWR | syscall.O_CLOEXEC) if err != nil { return nil, nil, err } + p := os.NewFile(uintptr(fd), "/dev/pts") sname, err := ptsname(p) if err != nil { return nil, nil, err } - t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0) + t, err := os.OpenFile("/dev/"+sname, os.O_RDWR, 0) if err != nil { return nil, nil, err } return p, t, nil } +func isptmaster(fd uintptr) (bool, error) { + err := ioctl(fd, syscall.TIOCPTMASTER, 0) + return err == nil, err +} + +var ( + emptyFiodgnameArg fiodgnameArg + ioctl_FIODGNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg)) +) + func ptsname(f *os.File) (string, error) { - var n int - err := ioctl(f.Fd(), sys_TIOCGPTN, &n) + master, err := isptmaster(f.Fd()) if err != nil { return "", err } - return "/dev/pts/" + strconv.Itoa(n), nil -} - -func ioctl(fd uintptr, cmd uintptr, data *int) error { - _, _, e := syscall.Syscall( - syscall.SYS_IOCTL, - fd, - cmd, - uintptr(unsafe.Pointer(data)), - ) - if e != 0 { - return syscall.ENOTTY + if !master { + return "", syscall.EINVAL } - return nil + + const n = _C_SPECNAMELEN + 1 + var ( + buf = make([]byte, n) + arg = fiodgnameArg{Len: n, Buf: (*byte)(unsafe.Pointer(&buf[0]))} + ) + err = ioctl(f.Fd(), ioctl_FIODGNAME, uintptr(unsafe.Pointer(&arg))) + if err != nil { + return "", err + } + + for i, c := range buf { + if c == 0 { + return string(buf[:i]), nil + } + } + return "", errors.New("FIODGNAME string not NUL-terminated") } diff --git a/vendor/src/github.com/kr/pty/pty_linux.go b/vendor/src/github.com/kr/pty/pty_linux.go index a5edfbb394..6e5a04241c 100644 --- a/vendor/src/github.com/kr/pty/pty_linux.go +++ b/vendor/src/github.com/kr/pty/pty_linux.go @@ -7,9 +7,9 @@ import ( "unsafe" ) -const ( - sys_TIOCGPTN = 0x80045430 - sys_TIOCSPTLCK = 0x40045431 +var ( + ioctl_TIOCGPTN = _IOR('T', 0x30, unsafe.Sizeof(_C_uint(0))) /* Get Pty Number (of pty-mux device) */ + ioctl_TIOCSPTLCK = _IOW('T', 0x31, unsafe.Sizeof(_C_int(0))) /* Lock/unlock Pty */ ) func open() (pty, tty *os.File, err error) { @@ -36,28 +36,16 @@ func open() (pty, tty *os.File, err error) { } func ptsname(f *os.File) (string, error) { - var n int - err := ioctl(f.Fd(), sys_TIOCGPTN, &n) + var n _C_uint + err := ioctl(f.Fd(), ioctl_TIOCGPTN, uintptr(unsafe.Pointer(&n))) if err != nil { return "", err } - return "/dev/pts/" + strconv.Itoa(n), nil + return "/dev/pts/" + strconv.Itoa(int(n)), nil } func unlockpt(f *os.File) error { - var u int - return ioctl(f.Fd(), sys_TIOCSPTLCK, &u) -} - -func ioctl(fd uintptr, cmd uintptr, data *int) error { - _, _, e := syscall.Syscall( - syscall.SYS_IOCTL, - fd, - cmd, - uintptr(unsafe.Pointer(data)), - ) - if e != 0 { - return syscall.ENOTTY - } - return nil + var u _C_int + // use TIOCSPTLCK with a zero valued arg to clear the slave pty lock + return ioctl(f.Fd(), ioctl_TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) } diff --git a/vendor/src/github.com/kr/pty/pty_unsupported.go b/vendor/src/github.com/kr/pty/pty_unsupported.go index d4958b3583..898c7303c4 100644 --- a/vendor/src/github.com/kr/pty/pty_unsupported.go +++ b/vendor/src/github.com/kr/pty/pty_unsupported.go @@ -9,19 +9,3 @@ import ( func open() (pty, tty *os.File, err error) { return nil, nil, ErrUnsupported } - -func ptsname(f *os.File) (string, error) { - return "", ErrUnsupported -} - -func grantpt(f *os.File) error { - return ErrUnsupported -} - -func unlockpt(f *os.File) error { - return ErrUnsupported -} - -func ioctl(fd, cmd, ptr uintptr) error { - return ErrUnsupported -} diff --git a/vendor/src/github.com/kr/pty/types.go b/vendor/src/github.com/kr/pty/types.go new file mode 100644 index 0000000000..5aecb6bcdc --- /dev/null +++ b/vendor/src/github.com/kr/pty/types.go @@ -0,0 +1,10 @@ +// +build ignore + +package pty + +import "C" + +type ( + _C_int C.int + _C_uint C.uint +) diff --git a/vendor/src/github.com/kr/pty/types_freebsd.go b/vendor/src/github.com/kr/pty/types_freebsd.go new file mode 100644 index 0000000000..ce3eb95181 --- /dev/null +++ b/vendor/src/github.com/kr/pty/types_freebsd.go @@ -0,0 +1,15 @@ +// +build ignore + +package pty + +/* +#include +#include +*/ +import "C" + +const ( + _C_SPECNAMELEN = C.SPECNAMELEN /* max length of devicename */ +) + +type fiodgnameArg C.struct_fiodgname_arg diff --git a/vendor/src/github.com/kr/pty/ztypes_386.go b/vendor/src/github.com/kr/pty/ztypes_386.go new file mode 100644 index 0000000000..ff0b8fd838 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_386.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/src/github.com/kr/pty/ztypes_amd64.go b/vendor/src/github.com/kr/pty/ztypes_amd64.go new file mode 100644 index 0000000000..ff0b8fd838 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_amd64.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/src/github.com/kr/pty/ztypes_arm.go b/vendor/src/github.com/kr/pty/ztypes_arm.go new file mode 100644 index 0000000000..ff0b8fd838 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_arm.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/src/github.com/kr/pty/ztypes_freebsd_386.go b/vendor/src/github.com/kr/pty/ztypes_freebsd_386.go new file mode 100644 index 0000000000..d9975374e3 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_freebsd_386.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go b/vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go new file mode 100644 index 0000000000..5fa102fcdf --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go @@ -0,0 +1,14 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Pad_cgo_0 [4]byte + Buf *byte +} diff --git a/vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go b/vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go new file mode 100644 index 0000000000..d9975374e3 --- /dev/null +++ b/vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go b/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go index 3aaae5973a..c5f335f7fb 100644 --- a/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go +++ b/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go @@ -388,6 +388,11 @@ func (c *capsV3) Apply(kind CapType) (err error) { } err = prctl(syscall.PR_CAPBSET_DROP, uintptr(i), 0, 0, 0) if err != nil { + // Ignore EINVAL since the capability may not be supported in this system. + if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL { + err = nil + continue + } return } }