diff --git a/.gitignore b/.gitignore index a40d9e067c..0087b47302 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ bundles/ .git/ vendor/pkg/ pyenv +Vagrantfile diff --git a/AUTHORS b/AUTHORS index a54c6337cb..df091d5950 100644 --- a/AUTHORS +++ b/AUTHORS @@ -334,6 +334,7 @@ Wes Morgan Will Dietz William Delanoue Will Rouesnel +Will Weaver Xiuming Chen Yang Bai Yurii Rashkovskii diff --git a/CHANGELOG.md b/CHANGELOG.md index e016472406..a6a93dc97d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,50 @@ # Changelog +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgresSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + ## 0.8.0 (2014-02-04) #### Notable features since 0.7.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ee40b1d3b2..c03c5d0d9c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,7 +88,7 @@ curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/maste Pull requests descriptions should be as clear as possible and include a reference to all the issues that they address. -Pull requests mustn't contain commits from other users or branches. +Pull requests must not contain commits from other users or branches. Code review comments may be added to your pull request. Discuss, then make the suggested modifications and push additional commits to your feature branch. Be @@ -109,6 +109,18 @@ name and email address match your git configuration. The AUTHORS file is regenerated occasionally from the git commit history, so a mismatch may result in your changes being overwritten. +### Merge approval + +Docker maintainers use LGTM (looks good to me) in comments on the code review +to indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects docs/ and registry/, it +needs an absolute majority from the maintainers of docs/ AND, separately, an +absolute majority of the maintainers of registry. + +For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) + ### Sign your work The sign-off is a simple line at the end of the explanation for the @@ -117,7 +129,7 @@ pass it on as an open-source patch. The rules are pretty simple: if you can certify the below: ``` -Docker Developer Grant and Certificate of Origin 1.1 +Docker Developer Certificate of Origin 1.1 By making a contribution to the Docker Project ("Project"), I represent and warrant that: @@ -158,16 +170,21 @@ curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/mas * Note: the above script expects to find your GitHub user name in ``git config --get github.user`` +#### Small patch exception + +There are several exceptions to the signing requirement. Currently these are: + +* Your patch fixes spelling or grammar errors. +* Your patch is a single line change to documentation. + If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io) - - ### How can I become a maintainer? * Step 1: learn the component inside out * Step 2: make yourself useful by contributing code, bugfixes, support etc. * Step 3: volunteer on the irc channel (#docker@freenode) -* Step 4: propose yourself at a scheduled #docker-meeting +* Step 4: propose yourself at a scheduled docker meeting in #docker-dev Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. You don't have to be a maintainer to make a difference on the project! diff --git a/Dockerfile b/Dockerfile index 8eb2459215..9929a10f3c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -62,7 +62,7 @@ RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go -RUN curl -s https://go.googlecode.com/files/go1.2.src.tar.gz | tar -v -C /usr/local -xz +RUN curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz ENV PATH /usr/local/go/bin:$PATH ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 @@ -87,6 +87,7 @@ RUN git config --global user.email 'docker-dummy@example.com' VOLUME /var/lib/docker WORKDIR /go/src/github.com/dotcloud/docker +ENV DOCKER_BUILDTAGS apparmor # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/FIXME b/FIXME index 91c8d92835..4f27d36856 100644 --- a/FIXME +++ b/FIXME @@ -11,20 +11,14 @@ They are just like FIXME comments in the source code, except we're not sure wher to put them - so we put them here :) -* Merge Runtime, Server and Builder into Runtime * Run linter on codebase * Unify build commands and regular commands * Move source code into src/ subdir for clarity * docker build: on non-existent local path for ADD, don't show full absolute path on the host -* docker tag foo REPO:TAG * use size header for progress bar in pull * Clean up context upload in build!!! * Parallel pull -* Always generate a resolv.conf per container, to avoid changing resolv.conf under thne container's feet -* Save metadata with import/export (#1974) * Upgrade dockerd without stopping containers * Simple command to remove all untagged images (`docker rmi $(docker images | awk '/^/ { print $3 }')`) * Simple command to clean up containers for disk space -* Caching after an ADD (#880) * Clean up the ProgressReader api, it's a PITA to use -* Use netlink instead of iproute2/iptables (#925) diff --git a/MAINTAINERS b/MAINTAINERS index 895fba563a..49d14ba0bd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1,9 +1,7 @@ Solomon Hykes (@shykes) Guillaume Charmes (@creack) -Victor Vieux (@vieux) +Victor Vieux (@vieux) Michael Crosby (@crosbymichael) .travis.yml: Tianon Gravi (@tianon) -api.go: Victor Vieux (@vieux) Dockerfile: Tianon Gravi (@tianon) Makefile: Tianon Gravi (@tianon) -Vagrantfile: Cristian Staretu (@unclejack) diff --git a/Makefile b/Makefile index 168707a80f..b3bea8a31f 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) DOCKER_IMAGE := docker:$(GIT_BRANCH) DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH) -DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles "$(DOCKER_IMAGE)" +DOCKER_RUN_DOCKER := docker run --rm -i -t --privileged -e TESTFLAGS -v "$(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles" "$(DOCKER_IMAGE)" default: binary @@ -17,10 +17,10 @@ cross: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross docs: docs-build - docker run -rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" + docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" docs-shell: docs-build - docker run -rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash + docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash test: build $(DOCKER_RUN_DOCKER) hack/make.sh test test-integration @@ -32,10 +32,10 @@ shell: build $(DOCKER_RUN_DOCKER) bash build: bundles - docker build -rm -t "$(DOCKER_IMAGE)" . + docker build -t "$(DOCKER_IMAGE)" . docs-build: - docker build -rm -t "$(DOCKER_DOCS_IMAGE)" docs + docker build -t "$(DOCKER_DOCS_IMAGE)" docs bundles: mkdir bundles diff --git a/README.md b/README.md index b6b77d6e61..1922be5d8a 100644 --- a/README.md +++ b/README.md @@ -4,19 +4,19 @@ Docker: the Linux container engine Docker is an open source project to pack, ship and run any application as a lightweight container -Docker containers are both *hardware-agnostic* and -*platform-agnostic*. This means that they can run anywhere, from your -laptop to the largest EC2 compute instance and everything in between - -and they don't require that you use a particular language, framework -or packaging system. That makes them great building blocks for -deploying and scaling web apps, databases and backend services without -depending on a particular stack or provider. +Docker containers are both *hardware-agnostic* and *platform-agnostic*. +This means that they can run anywhere, from your laptop to the largest +EC2 compute instance and everything in between - and they don't require +that you use a particular language, framework or packaging system. That +makes them great building blocks for deploying and scaling web apps, +databases and backend services without depending on a particular stack +or provider. Docker is an open-source implementation of the deployment engine which -powers [dotCloud](http://dotcloud.com), a popular -Platform-as-a-Service. It benefits directly from the experience -accumulated over several years of large-scale operation and support of -hundreds of thousands of applications and databases. +powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service. +It benefits directly from the experience accumulated over several years +of large-scale operation and support of hundreds of thousands of +applications and databases. ![Docker L](docs/theme/docker/static/img/dockerlogo-h.png "Docker") @@ -24,10 +24,10 @@ hundreds of thousands of applications and databases. A common method for distributing applications and sandboxing their execution is to use virtual machines, or VMs. Typical VM formats are -VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In -theory these formats should allow every developer to automatically -package their application into a "machine" for easy distribution and -deployment. In practice, that almost never happens, for a few reasons: +VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In theory +these formats should allow every developer to automatically package +their application into a "machine" for easy distribution and deployment. +In practice, that almost never happens, for a few reasons: * *Size*: VMs are very large which makes them impractical to store and transfer. @@ -47,39 +47,37 @@ deployment. In practice, that almost never happens, for a few reasons: service discovery. By contrast, Docker relies on a different sandboxing method known as -*containerization*. Unlike traditional virtualization, -containerization takes place at the kernel level. Most modern -operating system kernels now support the primitives necessary for -containerization, including Linux with [openvz](http://openvz.org), +*containerization*. Unlike traditional virtualization, containerization +takes place at the kernel level. Most modern operating system kernels +now support the primitives necessary for containerization, including +Linux with [openvz](http://openvz.org), [vserver](http://linux-vserver.org) and more recently [lxc](http://lxc.sourceforge.net), Solaris with [zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc) and FreeBSD with [Jails](http://www.freebsd.org/doc/handbook/jails.html). -Docker builds on top of these low-level primitives to offer developers -a portable format and runtime environment that solves all 4 -problems. Docker containers are small (and their transfer can be -optimized with layers), they have basically zero memory and cpu -overhead, they are completely portable and are designed from the -ground up with an application-centric design. +Docker builds on top of these low-level primitives to offer developers a +portable format and runtime environment that solves all 4 problems. +Docker containers are small (and their transfer can be optimized with +layers), they have basically zero memory and cpu overhead, they are +completely portable and are designed from the ground up with an +application-centric design. -The best part: because ``docker`` operates at the OS level, it can -still be run inside a VM! +The best part: because Docker operates at the OS level, it can still be +run inside a VM! ## Plays well with others Docker does not require that you buy into a particular programming language, framework, packaging system or configuration language. -Is your application a Unix process? Does it use files, tcp -connections, environment variables, standard Unix streams and -command-line arguments as inputs and outputs? Then ``docker`` can run -it. +Is your application a Unix process? Does it use files, tcp connections, +environment variables, standard Unix streams and command-line arguments +as inputs and outputs? Then Docker can run it. Can your application's build be expressed as a sequence of such -commands? Then ``docker`` can build it. - +commands? Then Docker can build it. ## Escape dependency hell @@ -126,14 +124,11 @@ build command inherits the result of the previous commands, the Here's a typical Docker build process: ```bash -from ubuntu:12.10 -run apt-get update -run DEBIAN_FRONTEND=noninteractive apt-get install -q -y python -run DEBIAN_FRONTEND=noninteractive apt-get install -q -y python-pip -run pip install django -run DEBIAN_FRONTEND=noninteractive apt-get install -q -y curl -run curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv -run cd helloflask-master && pip install -r requirements.txt +FROM ubuntu:12.04 +RUN apt-get update +RUN apt-get install -q -y python python-pip curl +RUN curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv +RUN cd helloflask-master && pip install -r requirements.txt ``` Note that Docker doesn't care *how* dependencies are built - as long @@ -143,22 +138,25 @@ as they can be built by running a Unix command in a container. Getting started =============== -Docker can be installed on your local machine as well as servers - both bare metal and virtualized. -It is available as a binary on most modern Linux systems, or as a VM on Windows, Mac and other systems. +Docker can be installed on your local machine as well as servers - both +bare metal and virtualized. It is available as a binary on most modern +Linux systems, or as a VM on Windows, Mac and other systems. -We also offer an interactive tutorial for quickly learning the basics of using Docker. - - -For up-to-date install instructions and online tutorials, see the [Getting Started page](http://www.docker.io/gettingstarted/). +We also offer an interactive tutorial for quickly learning the basics of +using Docker. +For up-to-date install instructions and online tutorials, see the +[Getting Started page](http://www.docker.io/gettingstarted/). Usage examples ============== -Docker can be used to run short-lived commands, long-running daemons (app servers, databases etc.), -interactive shell sessions, etc. +Docker can be used to run short-lived commands, long-running daemons +(app servers, databases etc.), interactive shell sessions, etc. -You can find a [list of real-world examples](http://docs.docker.io/en/latest/examples/) in the documentation. +You can find a [list of real-world +examples](http://docs.docker.io/en/latest/examples/) in the +documentation. Under the hood -------------- @@ -170,13 +168,7 @@ Under the hood, Docker is built on the following components: and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel; -* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union - filesystem with copy-on-write capabilities; -* The [Go](http://golang.org) programming language; -* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to - simplify the creation of Linux containers. - - +* The [Go](http://golang.org) programming language. Contributing to Docker ====================== @@ -187,7 +179,6 @@ started [here](CONTRIBUTING.md). They are probably not perfect, please let us know if anything feels wrong or incomplete. - ### Legal *Brought to you courtesy of our legal counsel. For more context, diff --git a/VERSION b/VERSION index f8d71478f5..d182dc9160 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.8.0-dev +0.8.1-dev diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index 23f262020a..0000000000 --- a/Vagrantfile +++ /dev/null @@ -1,206 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -BOX_NAME = ENV['BOX_NAME'] || "ubuntu" -BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box" -VF_BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64_vmware_fusion.box" -AWS_BOX_URI = ENV['BOX_URI'] || "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box" -AWS_REGION = ENV['AWS_REGION'] || "us-east-1" -AWS_AMI = ENV['AWS_AMI'] || "ami-69f5a900" -AWS_INSTANCE_TYPE = ENV['AWS_INSTANCE_TYPE'] || 't1.micro' -SSH_PRIVKEY_PATH = ENV['SSH_PRIVKEY_PATH'] -PRIVATE_NETWORK = ENV['PRIVATE_NETWORK'] - -# Boolean that forwards the Docker dynamic ports 49000-49900 -# See http://docs.docker.io/en/latest/use/port_redirection/ for more -# $ FORWARD_DOCKER_PORTS=1 vagrant [up|reload] -FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS'] -VAGRANT_RAM = ENV['VAGRANT_RAM'] || 512 -VAGRANT_CORES = ENV['VAGRANT_CORES'] || 1 - -# You may also provide a comma-separated list of ports -# for Vagrant to forward. For example: -# $ FORWARD_PORTS=8080,27017 vagrant [up|reload] -FORWARD_PORTS = ENV['FORWARD_PORTS'] - -# A script to upgrade from the 12.04 kernel to the raring backport kernel (3.8) -# and install docker. -$script = <"> + src="https://asciinema.org/a/7658.js" + id="asciicast-7658" async>"> ---- @@ -82,7 +78,7 @@ See the example in action .. _hello_world_daemon: Hello World Daemon -================== +------------------ .. include:: example_header.inc @@ -172,14 +168,14 @@ See the example in action id="asciicast-2562" async>"> -The next example in the series is a :ref:`python_web_app` example, or +The next example in the series is a :ref:`nodejs_web_app` example, or you could skip to any of the other examples: -* :ref:`python_web_app` * :ref:`nodejs_web_app` * :ref:`running_redis_service` * :ref:`running_ssh_service` * :ref:`running_couchdb_service` * :ref:`postgresql_service` * :ref:`mongodb_image` +* :ref:`python_web_app` diff --git a/docs/sources/examples/index.rst b/docs/sources/examples/index.rst index cd08107e7a..cf9ed9340a 100644 --- a/docs/sources/examples/index.rst +++ b/docs/sources/examples/index.rst @@ -16,7 +16,6 @@ to more substantial services like those which you might find in production. :maxdepth: 1 hello_world - python_web_app nodejs_web_app running_redis_service running_ssh_service @@ -26,3 +25,4 @@ to more substantial services like those which you might find in production. running_riak_service using_supervisord cfengine_process_management + python_web_app diff --git a/docs/sources/examples/postgresql_service.Dockerfile b/docs/sources/examples/postgresql_service.Dockerfile new file mode 100644 index 0000000000..af1423f258 --- /dev/null +++ b/docs/sources/examples/postgresql_service.Dockerfile @@ -0,0 +1,53 @@ +# +# example Dockerfile for http://docs.docker.io/en/latest/examples/postgresql_service/ +# + +FROM ubuntu +MAINTAINER SvenDowideit@docker.com + +# Add the PostgreSQL PGP key to verify their Debian packages. +# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 + +# Add PostgreSQL's repository. It contains the most recent stable release +# of PostgreSQL, ``9.3``. +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list + +# Update the Ubuntu and PostgreSQL repository indexes +RUN apt-get update + +# Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 +# There are some warnings (in red) that show up during the build. You can hide +# them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive +RUN apt-get -y -q install python-software-properties software-properties-common +RUN apt-get -y -q install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 + +# Note: The official Debian and Ubuntu images automatically ``apt-get clean`` +# after each ``apt-get`` + +# Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` +USER postgres + +# Create a PostgreSQL role named ``docker`` with ``docker`` as the password and +# then create a database `docker` owned by the ``docker`` role. +# Note: here we use ``&&\`` to run commands one after the other - the ``\`` +# allows the RUN command to span multiple lines. +RUN /etc/init.d/postgresql start &&\ + psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\ + createdb -O docker docker + +# Adjust PostgreSQL configuration so that remote connections to the +# database are possible. +RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf + +# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf`` +RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf + +# Expose the PostgreSQL port +EXPOSE 5432 + +# Add VOLUMEs to allow backup of config, logs and databases +VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] + +# Set the default command to run when starting the container +CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] diff --git a/docs/sources/examples/postgresql_service.rst b/docs/sources/examples/postgresql_service.rst index 1c427563e7..5a2323471b 100644 --- a/docs/sources/examples/postgresql_service.rst +++ b/docs/sources/examples/postgresql_service.rst @@ -9,152 +9,109 @@ PostgreSQL Service .. include:: example_header.inc -.. note:: - - A shorter version of `this blog post`_. - -.. _this blog post: http://zaiste.net/2013/08/docker_postgresql_how_to/ - Installing PostgreSQL on Docker ------------------------------- -Run an interactive shell in a Docker container. +Assuming there is no Docker image that suits your needs in `the index`_, you +can create one yourself. -.. code-block:: bash +.. _the index: http://index.docker.io - sudo docker run -i -t ubuntu /bin/bash - -Update its dependencies. - -.. code-block:: bash - - apt-get update - -Install ``python-software-properties``, ``software-properties-common``, ``wget`` and ``vim``. - -.. code-block:: bash - - apt-get -y install python-software-properties software-properties-common wget vim - -Add PostgreSQL's repository. It contains the most recent stable release -of PostgreSQL, ``9.3``. - -.. code-block:: bash - - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - - echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list - apt-get update - -Finally, install PostgreSQL 9.3 - -.. code-block:: bash - - apt-get -y install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 - -Now, create a PostgreSQL superuser role that can create databases and -other roles. Following Vagrant's convention the role will be named -``docker`` with ``docker`` password assigned to it. - -.. code-block:: bash - - su postgres -c "createuser -P -d -r -s docker" - -Create a test database also named ``docker`` owned by previously created ``docker`` -role. - -.. code-block:: bash - - su postgres -c "createdb -O docker docker" - -Adjust PostgreSQL configuration so that remote connections to the -database are possible. Make sure that inside -``/etc/postgresql/9.3/main/pg_hba.conf`` you have following line: - -.. code-block:: bash - - host all all 0.0.0.0/0 md5 - -Additionaly, inside ``/etc/postgresql/9.3/main/postgresql.conf`` -uncomment ``listen_addresses`` like so: - -.. code-block:: bash - - listen_addresses='*' +Start by creating a new Dockerfile: .. note:: This PostgreSQL setup is for development only purposes. Refer - to PostgreSQL documentation how to fine-tune these settings so that it - is secure enough. + to the PostgreSQL documentation to fine-tune these settings so that it + is suitably secure. -Exit. +.. literalinclude:: postgresql_service.Dockerfile + +Build an image from the Dockerfile assign it a name. .. code-block:: bash - exit + $ sudo docker build -t eg_postgresql . -Create an image from our container and assign it a name. The ```` -is in the Bash prompt; you can also locate it using ``docker ps -a``. +And run the PostgreSQL server container (in the foreground): .. code-block:: bash - sudo docker commit /postgresql + $ sudo docker run -rm -P -name pg_test eg_postgresql -Finally, run the PostgreSQL server via ``docker``. +There are 2 ways to connect to the PostgreSQL server. We can use +:ref:`working_with_links_names`, or we can access it from our host (or the network). + +.. note:: The ``-rm`` removes the container and its image when the container + exists successfully. + +Using container linking +^^^^^^^^^^^^^^^^^^^^^^^ + +Containers can be linked to another container's ports directly using +``-link remote_name:local_alias`` in the client's ``docker run``. This will +set a number of environment variables that can then be used to connect: .. code-block:: bash - CONTAINER=$(sudo docker run -d -p 5432 \ - -t /postgresql \ - /bin/su postgres -c '/usr/lib/postgresql/9.3/bin/postgres \ - -D /var/lib/postgresql/9.3/main \ - -c config_file=/etc/postgresql/9.3/main/postgresql.conf') + $ sudo docker run -rm -t -i -link pg_test:pg eg_postgresql bash -Connect the PostgreSQL server using ``psql`` (You will need the -postgresql client installed on the machine. For ubuntu, use something -like ``sudo apt-get install postgresql-client``). + postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password + +Connecting from your host system +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Assuming you have the postgresql-client installed, you can use the host-mapped port +to test as well. You need to use ``docker ps`` to find out what local host port the +container is mapped to first: .. code-block:: bash - CONTAINER_IP=$(sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $CONTAINER) - psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test + $ psql -h localhost -p 49153 -d docker -U docker --password -As before, create roles or databases if needed. +Testing the database +^^^^^^^^^^^^^^^^^^^^ + +Once you have authenticated and have a ``docker =#`` prompt, you can +create a table and populate it. .. code-block:: bash psql (9.3.1) Type "help" for help. - docker=# CREATE DATABASE foo OWNER=docker; - CREATE DATABASE + docker=# CREATE TABLE cities ( + docker(# name varchar(80), + docker(# location point + docker(# ); + CREATE TABLE + docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); + INSERT 0 1 + docker=# select * from cities; + name | location + ---------------+----------- + San Francisco | (-194,53) + (1 row) -Additionally, publish your newly created image on the Docker Index. +Using the container volumes +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can use the defined volumes to inspect the PostgreSQL log files and to backup your +configuration and data: .. code-block:: bash - sudo docker login - Username: - [...] + docker run -rm --volumes-from pg_test -t -i busybox sh -.. code-block:: bash + / # ls + bin etc lib linuxrc mnt proc run sys usr + dev home lib64 media opt root sbin tmp var + / # ls /etc/postgresql/9.3/main/ + environment pg_hba.conf postgresql.conf + pg_ctl.conf pg_ident.conf start.conf + /tmp # ls /var/log + ldconfig postgresql - sudo docker push /postgresql - -PostgreSQL service auto-launch ------------------------------- - -Running our image seems complicated. We have to specify the whole command with -``docker run``. Let's simplify it so the service starts automatically when the -container starts. - -.. code-block:: bash - - sudo docker commit -run='{"Cmd": \ - ["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.3/bin/postgres -D \ - /var/lib/postgresql/9.3/main -c \ - config_file=/etc/postgresql/9.3/main/postgresql.conf"], "PortSpecs": ["5432"]}' \ - /postgresql - -From now on, just type ``docker run /postgresql`` and -PostgreSQL should automatically start. diff --git a/docs/sources/examples/python_web_app.rst b/docs/sources/examples/python_web_app.rst index f31b31b7d2..5b8e3f6b4b 100644 --- a/docs/sources/examples/python_web_app.rst +++ b/docs/sources/examples/python_web_app.rst @@ -9,109 +9,137 @@ Python Web App .. include:: example_header.inc -The goal of this example is to show you how you can author your own -Docker images using a parent image, making changes to it, and then -saving the results as a new image. We will do that by making a simple -hello Flask web application image. +While using Dockerfiles is the preferred way to create maintainable +and repeatable images, its useful to know how you can try things out +and then commit your live changes to an image. -**Steps:** +The goal of this example is to show you how you can modify your own +Docker images by making changes to a running +container, and then saving the results as a new image. We will do +that by making a simple 'hello world' Flask web application image. + +Download the initial image +-------------------------- + +Download the ``shykes/pybuilder`` Docker image from the ``http://index.docker.io`` +registry. + +This image contains a ``buildapp`` script to download the web app and then ``pip install`` +any required modules, and a ``runapp`` script that finds the ``app.py`` and runs it. + +.. _`shykes/pybuilder`: https://github.com/shykes/pybuilder .. code-block:: bash - sudo docker pull shykes/pybuilder + $ sudo docker pull shykes/pybuilder -We are downloading the ``shykes/pybuilder`` Docker image +.. note:: This container was built with a very old version of docker + (May 2013 - see `shykes/pybuilder`_ ), when the ``Dockerfile`` format was different, + but the image can still be used now. + +Interactively make some modifications +------------------------------------- + +We then start a new container running interactively using the image. +First, we set a ``URL`` variable that points to a tarball of a simple +helloflask web app, and then we run a command contained in the image called +``buildapp``, passing it the ``$URL`` variable. The container is +given a name ``pybuilder_run`` which we will use in the next steps. + +While this example is simple, you could run any number of interactive commands, +try things out, and then exit when you're done. .. code-block:: bash - URL=http://github.com/shykes/helloflask/archive/master.tar.gz + $ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash -We set a ``URL`` variable that points to a tarball of a simple helloflask web app - -.. code-block:: bash - - BUILD_JOB=$(sudo docker run -d -t shykes/pybuilder:latest /usr/local/bin/buildapp $URL) - -Inside of the ``shykes/pybuilder`` image there is a command called -``buildapp``, we are running that command and passing the ``$URL`` variable -from step 2 to it, and running the whole thing inside of a new -container. The ``BUILD_JOB`` environment variable will be set with the new container ID. - -.. code-block:: bash - - sudo docker attach -sig-proxy=false $BUILD_JOB + $$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz + $$ /usr/local/bin/buildapp $URL [...] + $$ exit -While this container is running, we can attach to the new container to -see what is going on. The flag ``--sig-proxy`` set as ``false`` allows you to connect and -disconnect (Ctrl-C) to it without stopping the container. - -.. code-block:: bash - - sudo docker ps -a - -List all Docker containers. If this container has already finished -running, it will still be listed here. - -.. code-block:: bash - - BUILD_IMG=$(sudo docker commit $BUILD_JOB _/builds/github.com/shykes/helloflask/master) +Commit the container to create a new image +------------------------------------------ Save the changes we just made in the container to a new image called -``_/builds/github.com/hykes/helloflask/master`` and save the image ID in -the ``BUILD_IMG`` variable name. +``/builds/github.com/shykes/helloflask/master``. You now have 3 different +ways to refer to the container: name ``pybuilder_run``, short-id ``c8b2e8228f11``, or +long-id ``c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9``. .. code-block:: bash - WEB_WORKER=$(sudo docker run -d -p 5000 $BUILD_IMG /usr/local/bin/runapp) + $ sudo docker commit pybuilder_run /builds/github.com/shykes/helloflask/master + c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9 + + +Run the new image to start the web worker +----------------------------------------- + +Use the new image to create a new container with +network port 5000 mapped to a local port + +.. code-block:: bash + + $ sudo docker run -d -p 5000 --name web_worker /builds/github.com/shykes/helloflask/master /usr/local/bin/runapp + - **"docker run -d "** run a command in a new container. We pass "-d" so it runs as a daemon. - **"-p 5000"** the web app is going to listen on this port, so it must be mapped from the container to the host system. -- **"$BUILD_IMG"** is the image we want to run the command inside of. - **/usr/local/bin/runapp** is the command which starts the web app. -Use the new image we just created and create a new container with -network port 5000, and return the container ID and store in the -``WEB_WORKER`` variable. -.. code-block:: bash +View the container logs +----------------------- - sudo docker logs $WEB_WORKER - * Running on http://0.0.0.0:5000/ - -View the logs for the new container using the ``WEB_WORKER`` variable, and +View the logs for the new ``web_worker`` container and if everything worked as planned you should see the line ``Running on http://0.0.0.0:5000/`` in the log output. +To exit the view without stopping the container, hit Ctrl-C, or open another +terminal and continue with the example while watching the result in the logs. + .. code-block:: bash - WEB_PORT=$(sudo docker port $WEB_WORKER 5000 | awk -F: '{ print $2 }') + $ sudo docker logs -f web_worker + * Running on http://0.0.0.0:5000/ + + +See the webapp output +--------------------- Look up the public-facing port which is NAT-ed. Find the private port used by the container and store it inside of the ``WEB_PORT`` variable. -.. code-block:: bash - - # install curl if necessary, then ... - curl http://127.0.0.1:$WEB_PORT - Hello world! - Access the web app using the ``curl`` binary. If everything worked as planned you should see the line ``Hello world!`` inside of your console. -**Video:** +.. code-block:: bash -See the example in action + $ WEB_PORT=$(sudo docker port web_worker 5000 | awk -F: '{ print $2 }') -.. raw:: html + # install curl if necessary, then ... + $ curl http://127.0.0.1:$WEB_PORT + Hello world! - -Continue to :ref:`running_ssh_service`. +Clean up example containers and images +-------------------------------------- + +.. code-block:: bash + + $ sudo docker ps --all + +List ``--all`` the Docker containers. If this container had already finished +running, it will still be listed here with a status of 'Exit 0'. + +.. code-block:: bash + + $ sudo docker stop web_worker + $ sudo docker rm web_worker pybuilder_run + $ sudo docker rmi /builds/github.com/shykes/helloflask/master shykes/pybuilder:latest + +And now stop the running web worker, and delete the containers, so that we can +then delete the images that we used. + diff --git a/docs/sources/examples/running_redis_service.rst b/docs/sources/examples/running_redis_service.rst index 9687f0cfa8..c9424867a4 100644 --- a/docs/sources/examples/running_redis_service.rst +++ b/docs/sources/examples/running_redis_service.rst @@ -67,14 +67,14 @@ Once inside our freshly created container we need to install Redis to get the apt-get -y install redis-server service redis-server stop -Now we can test the connection. Firstly, let's look at the available environmental -variables in our web application container. We can use these to get the IP and port -of our ``redis`` container. +As we've used the ``--link redis:db`` option, Docker has created some environment +variables in our web application container. .. code-block:: bash - env - . . . + env | grep DB_ + + # Should return something similar to this with your values DB_NAME=/violet_wolf/db DB_PORT_6379_TCP_PORT=6379 DB_PORT=tcp://172.17.0.33:6379 diff --git a/docs/sources/examples/running_ssh_service.Dockerfile b/docs/sources/examples/running_ssh_service.Dockerfile new file mode 100644 index 0000000000..dd2acb7a4b --- /dev/null +++ b/docs/sources/examples/running_ssh_service.Dockerfile @@ -0,0 +1,17 @@ +# sshd +# +# VERSION 0.0.1 + +FROM ubuntu +MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com" + +# make sure the package repository is up to date +RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list +RUN apt-get update + +RUN apt-get install -y openssh-server +RUN mkdir /var/run/sshd +RUN echo 'root:screencast' |chpasswd + +EXPOSE 22 +CMD /usr/sbin/sshd -D diff --git a/docs/sources/examples/running_ssh_service.rst b/docs/sources/examples/running_ssh_service.rst index 52fe1f5914..d27799bee7 100644 --- a/docs/sources/examples/running_ssh_service.rst +++ b/docs/sources/examples/running_ssh_service.rst @@ -1,5 +1,5 @@ :title: Running an SSH service -:description: A screencast of installing and running an sshd service +:description: Installing and running an sshd service :keywords: docker, example, package installation, networking .. _running_ssh_service: @@ -9,101 +9,41 @@ SSH Daemon Service .. include:: example_header.inc +The following Dockerfile sets up an sshd service in a container that you can use +to connect to and inspect other container's volumes, or to get quick access to a +test container. -**Video:** +.. literalinclude:: running_ssh_service.Dockerfile -I've created a little screencast to show how to create an SSHd service -and connect to it. It is something like 11 minutes and not entirely -smooth, but it gives you a good idea. - -.. note:: - This screencast was created before Docker version 0.5.2, so the - daemon is unprotected and available via a TCP port. When you run - through the same steps in a newer version of Docker, you will - need to add ``sudo`` in front of each ``docker`` command in order - to reach the daemon over its protected Unix socket. - -.. raw:: html - - - -You can also get this sshd container by using: +Build the image using: .. code-block:: bash - sudo docker pull dhrp/sshd + $ sudo docker build -rm -t eg_sshd . - -The password is ``screencast``. - -**Video's Transcription:** +Then run it. You can then use ``docker port`` to find out what host port the container's +port 22 is mapped to: .. code-block:: bash - # Hello! We are going to try and install openssh on a container and run it as a service - # let's pull ubuntu to get a base ubuntu image. - $ docker pull ubuntu - # I had it so it was quick - # now let's connect using -i for interactive and with -t for terminal - # we execute /bin/bash to get a prompt. - $ docker run -i -t ubuntu /bin/bash - # yes! we are in! - # now lets install openssh - $ apt-get update - $ apt-get install openssh-server - # ok. lets see if we can run it. - $ which sshd - # we need to create privilege separation directory - $ mkdir /var/run/sshd - $ /usr/sbin/sshd - $ exit - # now let's commit it - # which container was it? - $ docker ps -a |more - $ docker commit a30a3a2f2b130749995f5902f079dc6ad31ea0621fac595128ec59c6da07feea dhrp/sshd - # I gave the name dhrp/sshd for the container - # now we can run it again - $ docker run -d dhrp/sshd /usr/sbin/sshd -D # D for daemon mode - # is it running? - $ docker ps - # yes! - # let's stop it - $ docker stop 0ebf7cec294755399d063f4b1627980d4cbff7d999f0bc82b59c300f8536a562 - $ docker ps - # and reconnect, but now open a port to it - $ docker run -d -p 22 dhrp/sshd /usr/sbin/sshd -D - $ docker port b2b407cf22cf8e7fa3736fa8852713571074536b1d31def3fdfcd9fa4fd8c8c5 22 - # it has now given us a port to connect to - # we have to connect using a public ip of our host - $ hostname - # *ifconfig* is deprecated, better use *ip addr show* now - $ ifconfig - $ ssh root@192.168.33.10 -p 49153 - # Ah! forgot to set root passwd - $ docker commit b2b407cf22cf8e7fa3736fa8852713571074536b1d31def3fdfcd9fa4fd8c8c5 dhrp/sshd - $ docker ps -a - $ docker run -i -t dhrp/sshd /bin/bash - $ passwd - $ exit - $ docker commit 9e863f0ca0af31c8b951048ba87641d67c382d08d655c2e4879c51410e0fedc1 dhrp/sshd - $ docker run -d -p 22 dhrp/sshd /usr/sbin/sshd -D - $ docker port a0aaa9558c90cf5c7782648df904a82365ebacce523e4acc085ac1213bfe2206 22 - # *ifconfig* is deprecated, better use *ip addr show* now - $ ifconfig - $ ssh root@192.168.33.10 -p 49154 - # Thanks for watching, Thatcher thatcher@dotcloud.com - -Update: -------- + $ sudo docker run -d -P -name test_sshd eg_sshd + $ sudo docker port test_sshd 22 + 0.0.0.0:49154 -For Ubuntu 13.10 using stackbrew/ubuntu, you may need do these additional steps: +And now you can ssh to port ``49154`` on the Docker daemon's host IP address +(``ip address`` or ``ifconfig`` can tell you that): -1. change /etc/pam.d/sshd, pam_loginuid line 'required' to 'optional' -2. echo LANG=\"en_US.UTF-8\" > /etc/default/locale +.. code-block:: bash + $ ssh root@192.168.1.2 -p 49154 + # The password is ``screencast``. + $$ +Finally, clean up after your test by stopping and removing the container, and +then removing the image. + +.. code-block:: bash + + $ sudo docker stop test_sshd + $ sudo docker rm test_sshd + $ sudo docker rmi eg_sshd diff --git a/docs/sources/examples/using_supervisord.rst b/docs/sources/examples/using_supervisord.rst index eed063292d..750b6c2334 100644 --- a/docs/sources/examples/using_supervisord.rst +++ b/docs/sources/examples/using_supervisord.rst @@ -112,7 +112,7 @@ Once we've got a built image we can launch a container from it. .. code-block:: bash - sudo docker run -p 22 -p 80 -t -i /supervisor + sudo docker run -p 22 -p 80 -t -i /supervisord 2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file) 2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing 2013-11-25 18:53:22,342 INFO supervisord started with pid 1 diff --git a/docs/sources/faq.rst b/docs/sources/faq.rst index cf072f34a7..07055941bd 100644 --- a/docs/sources/faq.rst +++ b/docs/sources/faq.rst @@ -25,9 +25,9 @@ Does Docker run on Mac OS X or Windows? Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a virtual machine on your box, and - get the best of both worlds. Check out the - :ref:`macosx` and :ref:`windows` installation - guides. + get the best of both worlds. Check out the :ref:`macosx` and + :ref:`windows` installation guides. The small Linux distribution boot2docker + can be run inside virtual machines on these two operating systems. How do containers compare to virtual machines? .............................................. @@ -183,10 +183,21 @@ Cloud: - Google Compute Engine - Rackspace +How do I report a security issue with Docker? +............................................. + +You can learn about the project's security policy `here `_ +and report security issues to this `mailbox `_. + +Why do I need to sign my commits to Docker with the DCO? +........................................................ + +Please read `our blog post `_ on the introduction of the DCO. + Can I help by adding some questions and answers? ................................................ - Definitely! You can fork `the repo`_ and edit the documentation sources. +Definitely! You can fork `the repo`_ and edit the documentation sources. Where can I find more answers? @@ -210,5 +221,4 @@ Where can I find more answers? .. _Ask questions on Stackoverflow: http://stackoverflow.com/search?q=docker .. _Join the conversation on Twitter: http://twitter.com/docker - Looking for something else to read? Checkout the :ref:`hello_world` example. diff --git a/docs/sources/index.rst b/docs/sources/index.rst index 346a6619c5..a89349b2bb 100644 --- a/docs/sources/index.rst +++ b/docs/sources/index.rst @@ -17,13 +17,13 @@ Common use cases for Docker include: - Deploying and scaling databases and backend services in a service-oriented environment. - Building custom PaaS environments, either from scratch or as an extension of off-the-shelf platforms like OpenShift or Cloud Foundry. -Please note Docker is currently under heavy developement. It should not be used in production (yet). +Please note Docker is currently under heavy development. It should not be used in production (yet). For a high-level overview of Docker, please see the `Introduction `_. When you're ready to start working with Docker, we have a `quick start `_ and a more in-depth guide to :ref:`ubuntu_linux` and other :ref:`installation_list` paths including prebuilt binaries, -Vagrant-created VMs, Rackspace and Amazon instances. +Rackspace and Amazon instances. Enough reading! :ref:`Try it out! ` diff --git a/docs/sources/installation/amazon.rst b/docs/sources/installation/amazon.rst index e8fdc2c1ca..b5465e25f8 100644 --- a/docs/sources/installation/amazon.rst +++ b/docs/sources/installation/amazon.rst @@ -1,5 +1,5 @@ :title: Installation on Amazon EC2 -:description: Docker installation on Amazon EC2 +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: amazon ec2, virtualization, cloud, docker, documentation, installation Amazon EC2 @@ -10,8 +10,7 @@ Amazon EC2 There are several ways to install Docker on AWS EC2: * :ref:`amazonquickstart` or -* :ref:`amazonstandard` or -* :ref:`amazonvagrant` +* :ref:`amazonstandard` **You'll need an** `AWS account `_ **first, of course.** @@ -73,112 +72,4 @@ running Ubuntu. Just follow Step 1 from :ref:`amazonquickstart` to pick an image (or use one of your own) and skip the step with the *User Data*. Then continue with the :ref:`ubuntu_linux` instructions. -.. _amazonvagrant: - -Use Vagrant ------------ - -.. include:: install_unofficial.inc - -And finally, if you prefer to work through Vagrant, you can install -Docker that way too. Vagrant 1.1 or higher is required. - -1. Install vagrant from http://www.vagrantup.com/ (or use your package manager) -2. Install the vagrant aws plugin - - :: - - vagrant plugin install vagrant-aws - - -3. Get the docker sources, this will give you the latest Vagrantfile. - - :: - - git clone https://github.com/dotcloud/docker.git - - -4. Check your AWS environment. - - Create a keypair specifically for EC2, give it a name and save it - to your disk. *I usually store these in my ~/.ssh/ folder*. - - Check that your default security group has an inbound rule to - accept SSH (port 22) connections. - -5. Inform Vagrant of your settings - - Vagrant will read your access credentials from your environment, so - we need to set them there first. Make sure you have everything on - amazon aws setup so you can (manually) deploy a new image to EC2. - - Note that where possible these variables are the same as those honored by - the ec2 api tools. - :: - - export AWS_ACCESS_KEY=xxx - export AWS_SECRET_KEY=xxx - export AWS_KEYPAIR_NAME=xxx - export SSH_PRIVKEY_PATH=xxx - - export BOX_NAME=xxx - export AWS_REGION=xxx - export AWS_AMI=xxx - export AWS_INSTANCE_TYPE=xxx - - The required environment variables are: - - * ``AWS_ACCESS_KEY`` - The API key used to make requests to AWS - * ``AWS_SECRET_KEY`` - The secret key to make AWS API requests - * ``AWS_KEYPAIR_NAME`` - The name of the keypair used for this EC2 instance - * ``SSH_PRIVKEY_PATH`` - The path to the private key for the named - keypair, for example ``~/.ssh/docker.pem`` - - There are a number of optional environment variables: - - * ``BOX_NAME`` - The name of the vagrant box to use. Defaults to - ``ubuntu``. - * ``AWS_REGION`` - The aws region to spawn the vm in. Defaults to - ``us-east-1``. - * ``AWS_AMI`` - The aws AMI to start with as a base. This must be - be an ubuntu 12.04 precise image. You must change this value if - ``AWS_REGION`` is set to a value other than ``us-east-1``. - This is because AMIs are region specific. Defaults to ``ami-69f5a900``. - * ``AWS_INSTANCE_TYPE`` - The aws instance type. Defaults to ``t1.micro``. - - You can check if they are set correctly by doing something like - - :: - - echo $AWS_ACCESS_KEY - -6. Do the magic! - - :: - - vagrant up --provider=aws - - - If it stalls indefinitely on ``[default] Waiting for SSH to become - available...``, Double check your default security zone on AWS - includes rights to SSH (port 22) to your container. - - If you have an advanced AWS setup, you might want to have a look at - `vagrant-aws `_. - -7. Connect to your machine - - .. code-block:: bash - - vagrant ssh - -8. Your first command - - Now you are in the VM, run docker - - .. code-block:: bash - - sudo docker - - Continue with the :ref:`hello_world` example. diff --git a/docs/sources/installation/archlinux.rst b/docs/sources/installation/archlinux.rst index 2d823bfd46..c9b4c1d2c5 100644 --- a/docs/sources/installation/archlinux.rst +++ b/docs/sources/installation/archlinux.rst @@ -1,5 +1,5 @@ :title: Installation on Arch Linux -:description: Docker installation on Arch Linux. +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: arch linux, virtualization, docker, documentation, installation .. _arch_linux: diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst index 976e94e344..bfdfbe211f 100644 --- a/docs/sources/installation/binaries.rst +++ b/docs/sources/installation/binaries.rst @@ -26,10 +26,7 @@ Check runtime dependencies To run properly, docker needs the following software to be installed at runtime: -- iproute2 version 3.5 or later (build after 2012-05-21), and - specifically the "ip" utility - iptables version 1.4 or later -- The LXC utility scripts (http://lxc.sourceforge.net) version 0.8 or later - Git version 1.7 or later - XZ Utils 4.9 or later @@ -41,7 +38,7 @@ Docker in daemon mode has specific kernel requirements. For details, check your distribution in :ref:`installation_list`. Note that Docker also has a client mode, which can run on virtually -any linux kernel (it even builds on OSX!). +any Linux kernel (it even builds on OSX!). Get the docker binary: diff --git a/docs/sources/installation/fedora.rst b/docs/sources/installation/fedora.rst index 6dd2bf91d9..7e0aee78fd 100644 --- a/docs/sources/installation/fedora.rst +++ b/docs/sources/installation/fedora.rst @@ -1,4 +1,4 @@ -:title: Requirements and Installation on Fedora +:title: Installation on Fedora :description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, Fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux diff --git a/docs/sources/installation/frugalware.rst b/docs/sources/installation/frugalware.rst index de2b92ae10..ed9bb2bfaa 100644 --- a/docs/sources/installation/frugalware.rst +++ b/docs/sources/installation/frugalware.rst @@ -1,5 +1,5 @@ :title: Installation on FrugalWare -:description: Docker installation on FrugalWare. +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: frugalware linux, virtualization, docker, documentation, installation .. _frugalware: diff --git a/docs/sources/installation/gentoolinux.rst b/docs/sources/installation/gentoolinux.rst index 421af0a1e7..5abfddeb91 100644 --- a/docs/sources/installation/gentoolinux.rst +++ b/docs/sources/installation/gentoolinux.rst @@ -1,5 +1,5 @@ -:title: Installation on Gentoo Linux -:description: Docker installation instructions and nuances for Gentoo Linux. +:title: Installation on Gentoo +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: gentoo linux, virtualization, docker, documentation, installation .. _gentoo_linux: diff --git a/docs/sources/installation/mac.rst b/docs/sources/installation/mac.rst index 4f0550ad3d..5139324d0b 100644 --- a/docs/sources/installation/mac.rst +++ b/docs/sources/installation/mac.rst @@ -1,4 +1,4 @@ -:title: Requirements and Installation on Mac OS X 10.6 Snow Leopard +:title: Installation on Mac OS X 10.6 Snow Leopard :description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, requirements, virtualbox, ssh, linux, os x, osx, mac @@ -39,7 +39,7 @@ boot2docker ``docker`` daemon. It also takes care of the installation for the OS image that is used for the job. -.. _GitHub page: https://github.com/steeve/boot2docker +.. _GitHub page: https://github.com/boot2docker/boot2docker Open up a new terminal window, if you have not already. @@ -49,10 +49,10 @@ Run the following commands to get boot2docker: # Enter the installation directory cd ~/bin - + # Get the file - curl https://raw.github.com/steeve/boot2docker/master/boot2docker > boot2docker - + curl https://raw.github.com/boot2docker/boot2docker/master/boot2docker > boot2docker + # Mark it executable chmod +x boot2docker @@ -67,13 +67,13 @@ Run the following commands to get it downloaded and set up: # Get the file curl -o docker https://get.docker.io/builds/Darwin/x86_64/docker-latest - + # Mark it executable chmod +x docker # Set the environment variable for the docker daemon - export DOCKER_HOST=tcp:// - + export DOCKER_HOST=tcp://127.0.0.1:4243 + # Copy the executable file sudo cp docker /usr/local/bin/ @@ -94,7 +94,7 @@ Inside the ``~/bin`` directory, run the following commands: # Run the VM (the docker daemon) ./boot2docker up - + # To see all available commands: ./boot2docker @@ -116,6 +116,21 @@ client just like any other application. # Git commit (server): c348c04 # Go version (server): go1.2 +Forwarding VM Port Range to Host +-------------------------------- + +If we take the port range that docker uses by default with the -P option +(49000-49900), and forward same range from host to vm, we'll be able to interact +with our containers as if they were running locally: + +.. code-block:: bash + + # vm must be powered off + for i in {49000..49900}; do + VBoxManage modifyvm "boot2docker-vm" --natpf1 "tcp-port$i,tcp,,$i,,$i"; + VBoxManage modifyvm "boot2docker-vm" --natpf1 "udp-port$i,udp,,$i,,$i"; + done + SSH-ing The VM -------------- @@ -138,7 +153,7 @@ boot2docker: See the GitHub page for `boot2docker`_. -.. _boot2docker: https://github.com/steeve/boot2docker +.. _boot2docker: https://github.com/boot2docker/boot2docker If SSH complains about keys: ---------------------------- @@ -147,6 +162,18 @@ If SSH complains about keys: ssh-keygen -R '[localhost]:2022' +Upgrading to a newer release of boot2docker +------------------------------------------- + +To upgrade an initialised VM, you can use the following 3 commands. Your persistence +disk will not be changed, so you won't lose your images and containers: + +.. code-block:: bash + + ./boot2docker stop + ./boot2docker download + ./boot2docker start + About the way Docker works on Mac OS X: --------------------------------------- diff --git a/docs/sources/installation/openSUSE.rst b/docs/sources/installation/openSUSE.rst index ded5de44a4..c791beacbf 100644 --- a/docs/sources/installation/openSUSE.rst +++ b/docs/sources/installation/openSUSE.rst @@ -1,5 +1,5 @@ :title: Installation on openSUSE -:description: Docker installation on openSUSE. +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: openSUSE, virtualbox, docker, documentation, installation .. _openSUSE: diff --git a/docs/sources/installation/rackspace.rst b/docs/sources/installation/rackspace.rst index d0005a14bc..687131a413 100644 --- a/docs/sources/installation/rackspace.rst +++ b/docs/sources/installation/rackspace.rst @@ -1,5 +1,5 @@ -:title: Rackspace Cloud Installation -:description: Installing Docker on Ubuntu proviced by Rackspace +:title: Installation on Rackspace Cloud +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Rackspace Cloud, installation, docker, linux, ubuntu Rackspace Cloud diff --git a/docs/sources/installation/rhel.rst b/docs/sources/installation/rhel.rst index 9036fb79ea..7930da6309 100644 --- a/docs/sources/installation/rhel.rst +++ b/docs/sources/installation/rhel.rst @@ -1,4 +1,4 @@ -:title: Requirements and Installation on Red Hat Enterprise Linux +:title: Installation on Red Hat Enterprise Linux :description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, requirements, linux, rhel, centos diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 3d6ee6415d..c459f33d3c 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -1,4 +1,4 @@ -:title: Requirements and Installation on Ubuntu Linux +:title: Installation on Ubuntu :description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux @@ -64,7 +64,7 @@ Installation an earlier version, you will need to follow them again. Docker is available as a Debian package, which makes installation -easy. **See the :ref:`installmirrors` section below if you are not in +easy. **See the** :ref:`installmirrors` **section below if you are not in the United States.** Other sources of the Debian packages may be faster for you to install. @@ -182,9 +182,12 @@ daemon will make the ownership of the Unix socket read/writable by the *docker* group when the daemon starts. The ``docker`` daemon must always run as the root user, but if you run the ``docker`` client as a user in the *docker* group then you don't need to add ``sudo`` to all the -client commands. +client commands. As of 0.9.0, you can specify that a group other than ``docker`` +should own the Unix socket with the ``-G`` option. + +.. warning:: The *docker* group (or the group specified with ``-G``) is + root-equivalent. -.. warning:: The *docker* group is root-equivalent. **Example:** @@ -217,15 +220,35 @@ To install the latest version of docker, use the standard ``apt-get`` method: # install the latest sudo apt-get install lxc-docker +Memory and Swap Accounting +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If want to enable memory and swap accounting, you must add the following +command-line parameters to your kernel:: + + cgroup_enable=memory swapaccount=1 + +On systems using GRUB (which is the default for Ubuntu), you can add those +parameters by editing ``/etc/default/grub`` and extending +``GRUB_CMDLINE_LINUX``. Look for the following line:: + + GRUB_CMDLINE_LINUX="" + +And replace it by the following one:: + + GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" + +Then run ``update-grub``, and reboot. + Troubleshooting ^^^^^^^^^^^^^^^ -On Linux Mint, the ``cgroups-lite`` package is not installed by default. +On Linux Mint, the ``cgroup-lite`` package is not installed by default. Before Docker will work correctly, you will need to install this via: .. code-block:: bash - sudo apt-get update && sudo apt-get install cgroups-lite + sudo apt-get update && sudo apt-get install cgroup-lite .. _ufw: @@ -261,6 +284,64 @@ incoming connections on the Docker port (default 4243): .. _installmirrors: +Docker and local DNS server warnings +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Systems which are running Ubuntu or an Ubuntu derivative on the desktop will +use `127.0.0.1` as the default nameserver in `/etc/resolv.conf`. NetworkManager +sets up dnsmasq to use the real DNS servers of the connection and sets up +`nameserver 127.0.0.1` in `/etc/resolv.conf`. + +When starting containers on these desktop machines, users will see a warning: + +.. code-block:: bash + + WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : [8.8.8.8 8.8.4.4] + +This warning is shown because the containers can't use the local DNS nameserver +and Docker will default to using an external nameserver. + +This can be worked around by specifying a DNS server to be used by the Docker +daemon for the containers: + +.. code-block:: bash + + sudo nano /etc/default/docker + --- + # Add: + DOCKER_OPTS="-dns 8.8.8.8" + # 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1 + # multiple DNS servers can be specified: -dns 8.8.8.8 -dns 192.168.1.1 + +The Docker daemon has to be restarted: + +.. code-block:: bash + + sudo restart docker + +.. warning:: If you're doing this on a laptop which connects to various networks, make sure to choose a public DNS server. + +An alternative solution involves disabling dnsmasq in NetworkManager by +following these steps: + +.. code-block:: bash + + sudo nano /etc/NetworkManager/NetworkManager.conf + ---- + # Change: + dns=dnsmasq + # to + #dns=dnsmasq + +NetworkManager and Docker need to be restarted afterwards: + +.. code-block:: bash + + sudo restart network-manager + sudo restart docker + +.. warning:: This might make DNS resolution slower on some networks. + Mirrors ^^^^^^^ diff --git a/docs/sources/installation/windows.rst b/docs/sources/installation/windows.rst index c980a32df9..d00b012e6c 100644 --- a/docs/sources/installation/windows.rst +++ b/docs/sources/installation/windows.rst @@ -1,223 +1,72 @@ -:title: Requirements and Installation on Windows -:description: Docker's tutorial to run docker on Windows -:keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin +:title: Installation on Windows +:description: Please note this project is currently under heavy development. It should not be used in production. +:keywords: Docker, Docker documentation, Windows, requirements, virtualbox, boot2docker .. _windows: -Installing Docker on Windows -============================ +Windows +======= -Docker can run on Windows using a VM like VirtualBox. You then run -Linux within the VM. +Docker can run on Windows using a virtualization platform like VirtualBox. A Linux +distribution is run inside a virtual machine and that's where Docker will run. Installation ------------ .. include:: install_header.inc -.. include:: install_unofficial.inc +1. Install virtualbox from https://www.virtualbox.org - or follow this `tutorial `_. -1. Install virtualbox from https://www.virtualbox.org - or follow this tutorial__ +2. Download the latest boot2docker.iso from https://github.com/boot2docker/boot2docker/releases. -.. __: http://www.slideshare.net/julienbarbier42/install-virtualbox-on-windows-7 +3. Start VirtualBox. -2. Install vagrant from http://www.vagrantup.com - or follow this tutorial__ +4. Create a new Virtual machine with the following settings: -.. __: http://www.slideshare.net/julienbarbier42/install-vagrant-on-windows-7 + - `Name: boot2docker` + - `Type: Linux` + - `Version: Linux 2.6 (64 bit)` + - `Memory size: 1024 MB` + - `Hard drive: Do not add a virtual hard drive` -3. Install git with ssh from http://git-scm.com/downloads - or follow this tutorial__ +5. Open the settings of the virtual machine: -.. __: http://www.slideshare.net/julienbarbier42/install-git-with-ssh-on-windows-7 + 5.1. go to Storage + 5.2. click the empty slot below `Controller: IDE` -We recommend having at least 2Gb of free disk space and 2Gb of RAM (or more). + 5.3. click the disc icon on the right of `IDE Secondary Master` -Opening a command prompt ------------------------- + 5.4. click `Choose a virtual CD/DVD disk file` -First open a cmd prompt. Press Windows key and then press “R” -key. This will open the RUN dialog box for you. Type “cmd” and press -Enter. Or you can click on Start, type “cmd” in the “Search programs -and files” field, and click on cmd.exe. +6. Browse to the path where you've saved the `boot2docker.iso`, select the `boot2docker.iso` and click open. -.. image:: images/win/_01.gif - :alt: Git install - :align: center +7. Click OK on the Settings dialog to save the changes and close the window. -This should open a cmd prompt window. +8. Start the virtual machine by clicking the green start button. -.. image:: images/win/_02.gif - :alt: run docker - :align: center - -Alternatively, you can also use a Cygwin terminal, or Git Bash (or any -other command line program you are usually using). The next steps -would be the same. - -.. _launch_ubuntu: - -Launch an Ubuntu virtual server -------------------------------- - -Let’s download and run an Ubuntu image with docker binaries already -installed. - -.. code-block:: bash - - git clone https://github.com/dotcloud/docker.git - cd docker - vagrant up - -.. image:: images/win/run_02_.gif - :alt: run docker - :align: center - -Congratulations! You are running an Ubuntu server with docker -installed on it. You do not see it though, because it is running in -the background. - -Log onto your Ubuntu server ---------------------------- - -Let’s log into your Ubuntu server now. To do so you have two choices: - -- Use Vagrant on Windows command prompt OR -- Use SSH - -Using Vagrant on Windows Command Prompt -``````````````````````````````````````` - -Run the following command - -.. code-block:: bash - - vagrant ssh - -You may see an error message starting with “`ssh` executable not -found”. In this case it means that you do not have SSH in your -PATH. If you do not have SSH in your PATH you can set it up with the -“set” command. For instance, if your ssh.exe is in the folder named -“C:\Program Files (x86)\Git\bin”, then you can run the following -command: - -.. code-block:: bash - - set PATH=%PATH%;C:\Program Files (x86)\Git\bin - -.. image:: images/win/run_03.gif - :alt: run docker - :align: center - -Using SSH -````````` - -First step is to get the IP and port of your Ubuntu server. Simply run: - -.. code-block:: bash - - vagrant ssh-config - -You should see an output with HostName and Port information. In this -example, HostName is 127.0.0.1 and port is 2222. And the User is -“vagrant”. The password is not shown, but it is also “vagrant”. - -.. image:: images/win/ssh-config.gif - :alt: run docker - :align: center - -You can now use this information for connecting via SSH to your -server. To do so you can: - -- Use putty.exe OR -- Use SSH from a terminal - -Use putty.exe -''''''''''''' - -You can download putty.exe from this page -http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html Launch -putty.exe and simply enter the information you got from last step. - -.. image:: images/win/putty.gif - :alt: run docker - :align: center - -Open, and enter user = vagrant and password = vagrant. - -.. image:: images/win/putty_2.gif - :alt: run docker - :align: center - -SSH from a terminal -''''''''''''''''''' - -You can also run this command on your favorite terminal (windows -prompt, cygwin, git-bash, …). Make sure to adapt the IP and port from -what you got from the vagrant ssh-config command. - -.. code-block:: bash - - ssh vagrant@127.0.0.1 –p 2222 - -Enter user = vagrant and password = vagrant. - -.. image:: images/win/cygwin.gif - :alt: run docker - :align: center - -Congratulations, you are now logged onto your Ubuntu Server, running -on top of your Windows machine ! +9. The boot2docker virtual machine should boot now. Running Docker -------------- -First you have to be root in order to run docker. Simply run the -following command: +boot2docker will log you in automatically so you can start using Docker right +away. -.. code-block:: bash - - sudo su - -You are now ready for the docker’s “hello world” example. Run +Let's try the “hello world” example. Run .. code-block:: bash docker run busybox echo hello world -.. image:: images/win/run_04.gif - :alt: run docker - :align: center +This will download the small busybox image and print hello world. -All done! -Now you can continue with the :ref:`hello_world` example. +Observations +------------ -Troubleshooting ---------------- +Persistent storage +`````````````````` -VM does not boot -```````````````` - -.. image:: images/win/ts_go_bios.JPG - -If you run into this error message "The VM failed to remain in the -'running' state while attempting to boot", please check that your -computer has virtualization technology available and activated by -going to the BIOS. Here's an example for an HP computer (System -configuration / Device configuration) - -.. image:: images/win/hp_bios_vm.JPG - -On some machines the BIOS menu can only be accessed before startup. -To access BIOS in this scenario you should restart your computer and -press ESC/Enter when prompted to access the boot and BIOS controls. Typically -the option to allow virtualization is contained within the BIOS/Security menu. - -Docker is not installed -``````````````````````` - -.. image:: images/win/ts_no_docker.JPG - -If you run into this error message "The program 'docker' is currently -not installed", try deleting the docker folder and restart from -:ref:`launch_ubuntu` +The virtual machine created above lacks any persistent data storage. All images +and containers will be lost when shutting down or rebooting the VM. diff --git a/docs/sources/reference/api/README.md b/docs/sources/reference/api/README.md index 10dede382b..ec42b89733 100644 --- a/docs/sources/reference/api/README.md +++ b/docs/sources/reference/api/README.md @@ -3,3 +3,4 @@ This directory holds the authoritative specifications of APIs defined and implem * The remote API by which a docker node can be queried over HTTP * The registry API by which a docker node can download and upload container images for storage and sharing * The index search API by which a docker node can search the public index for images to download +* The docker.io OAuth and accounts API which 3rd party services can use to access account information diff --git a/docs/sources/reference/api/_static/io_oauth_authorization_page.png b/docs/sources/reference/api/_static/io_oauth_authorization_page.png new file mode 100644 index 0000000000..798044ed04 Binary files /dev/null and b/docs/sources/reference/api/_static/io_oauth_authorization_page.png differ diff --git a/docs/sources/reference/api/docker_io_accounts_api.rst b/docs/sources/reference/api/docker_io_accounts_api.rst new file mode 100644 index 0000000000..7976f1fddf --- /dev/null +++ b/docs/sources/reference/api/docker_io_accounts_api.rst @@ -0,0 +1,308 @@ +:title: docker.io Accounts API +:description: API Documentation for docker.io accounts. +:keywords: API, Docker, accounts, REST, documentation + + +====================== +docker.io Accounts API +====================== + +.. contents:: Table of Contents + + +1. Endpoints +============ + + +1.1 Get a single user +^^^^^^^^^^^^^^^^^^^^^ + +.. http:get:: /api/v1.1/users/:username/ + + Get profile info for the specified user. + + :param username: username of the user whose profile info is being requested. + + :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. + + :statuscode 200: success, user data returned. + :statuscode 401: authentication error. + :statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``profile_read`` scope. + :statuscode 404: the specified username does not exist. + + **Example request**: + + .. sourcecode:: http + + GET /api/v1.1/users/janedoe/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id": 2, + "username": "janedoe", + "url": "", + "date_joined": "2014-02-12T17:58:01.431312Z", + "type": "User", + "full_name": "Jane Doe", + "location": "San Francisco, CA", + "company": "Success, Inc.", + "profile_url": "https://docker.io/", + "gravatar_email": "jane.doe+gravatar@example.com", + "email": "jane.doe@example.com", + "is_active": true + } + + +1.2 Update a single user +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. http:patch:: /api/v1.1/users/:username/ + + Update profile info for the specified user. + + :param username: username of the user whose profile info is being updated. + + :jsonparam string full_name: (optional) the new name of the user. + :jsonparam string location: (optional) the new location. + :jsonparam string company: (optional) the new company of the user. + :jsonparam string profile_url: (optional) the new profile url. + :jsonparam string gravatar_email: (optional) the new Gravatar email address. + + :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. + :reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc. + + :statuscode 200: success, user data updated. + :statuscode 400: post data validation error. + :statuscode 401: authentication error. + :statuscode 403: permission error, authenticated user must be the user whose data is being updated, OAuth access tokens must have ``profile_write`` scope. + :statuscode 404: the specified username does not exist. + + **Example request**: + + .. sourcecode:: http + + PATCH /api/v1.1/users/janedoe/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= + + { + "location": "Private Island", + "profile_url": "http://janedoe.com/", + "company": "Retired", + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id": 2, + "username": "janedoe", + "url": "", + "date_joined": "2014-02-12T17:58:01.431312Z", + "type": "User", + "full_name": "Jane Doe", + "location": "Private Island", + "company": "Retired", + "profile_url": "http://janedoe.com/", + "gravatar_email": "jane.doe+gravatar@example.com", + "email": "jane.doe@example.com", + "is_active": true + } + + +1.3 List email addresses for a user +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. http:get:: /api/v1.1/users/:username/emails/ + + List email info for the specified user. + + :param username: username of the user whose profile info is being updated. + + :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token + + :statuscode 200: success, user data updated. + :statuscode 401: authentication error. + :statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``email_read`` scope. + :statuscode 404: the specified username does not exist. + + **Example request**: + + .. sourcecode:: http + + GET /api/v1.1/users/janedoe/emails/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "email": "jane.doe@example.com", + "verified": true, + "primary": true + } + ] + + +1.4 Add email address for a user +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. http:post:: /api/v1.1/users/:username/emails/ + + Add a new email address to the specified user's account. The email address + must be verified separately, a confirmation email is not automatically sent. + + :jsonparam string email: email address to be added. + + :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. + :reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc. + + :statuscode 201: success, new email added. + :statuscode 400: data validation error. + :statuscode 401: authentication error. + :statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``email_write`` scope. + :statuscode 404: the specified username does not exist. + + **Example request**: + + .. sourcecode:: http + + POST /api/v1.1/users/janedoe/emails/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Content-Type: application/json + Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM + + { + "email": "jane.doe+other@example.com" + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "email": "jane.doe+other@example.com", + "verified": false, + "primary": false + } + + +1.5 Update an email address for a user +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. http:patch:: /api/v1.1/users/:username/emails/ + + Update an email address for the specified user to either verify an email + address or set it as the primary email for the user. You cannot use this + endpoint to un-verify an email address. You cannot use this endpoint to + unset the primary email, only set another as the primary. + + :param username: username of the user whose email info is being updated. + + :jsonparam string email: the email address to be updated. + :jsonparam boolean verified: (optional) whether the email address is verified, must be ``true`` or absent. + :jsonparam boolean primary: (optional) whether to set the email address as the primary email, must be ``true`` or absent. + + :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. + :reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc. + + :statuscode 200: success, user's email updated. + :statuscode 400: data validation error. + :statuscode 401: authentication error. + :statuscode 403: permission error, authenticated user must be the user whose data is being updated, OAuth access tokens must have ``email_write`` scope. + :statuscode 404: the specified username or email address does not exist. + + **Example request**: + + Once you have independently verified an email address. + + .. sourcecode:: http + + PATCH /api/v1.1/users/janedoe/emails/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= + + { + "email": "jane.doe+other@example.com", + "verified": true, + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "email": "jane.doe+other@example.com", + "verified": true, + "primary": false + } + + +1.6 Delete email address for a user +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. http:delete:: /api/v1.1/users/:username/emails/ + + Delete an email address from the specified user's account. You cannot + delete a user's primary email address. + + :jsonparam string email: email address to be deleted. + + :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. + :reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc. + + :statuscode 204: success, email address removed. + :statuscode 400: validation error. + :statuscode 401: authentication error. + :statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``email_write`` scope. + :statuscode 404: the specified username or email address does not exist. + + **Example request**: + + .. sourcecode:: http + + DELETE /api/v1.1/users/janedoe/emails/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Content-Type: application/json + Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM + + { + "email": "jane.doe+other@example.com" + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 NO CONTENT + Content-Length: 0 diff --git a/docs/sources/reference/api/docker_io_oauth_api.rst b/docs/sources/reference/api/docker_io_oauth_api.rst new file mode 100644 index 0000000000..d68dd8d36c --- /dev/null +++ b/docs/sources/reference/api/docker_io_oauth_api.rst @@ -0,0 +1,253 @@ +:title: docker.io OAuth API +:description: API Documentation for docker.io's OAuth flow. +:keywords: API, Docker, oauth, REST, documentation + + +=================== +docker.io OAuth API +=================== + +.. contents:: Table of Contents + + +1. Brief introduction +===================== + +Some docker.io API requests will require an access token to authenticate. To +get an access token for a user, that user must first grant your application +access to their docker.io account. In order for them to grant your application +access you must first register your application. + +Before continuing, we encourage you to familiarize yourself with +`The OAuth 2.0 Authorization Framework `_. + +*Also note that all OAuth interactions must take place over https connections* + + +2. Register Your Application +============================ + +You will need to register your application with docker.io before users will +be able to grant your application access to their account information. We +are currently only allowing applications selectively. To request registration +of your application send an email to support-accounts@docker.com with the +following information: + +- The name of your application +- A description of your application and the service it will provide + to docker.io users. +- A callback URI that we will use for redirecting authorization requests to + your application. These are used in the step of getting an Authorization + Code. The domain name of the callback URI will be visible to the user when + they are requested to authorize your application. + +When your application is approved you will receive a response from the +docker.io team with your ``client_id`` and ``client_secret`` which your +application will use in the steps of getting an Authorization Code and getting +an Access Token. + + +3. Endpoints +============ + +3.1 Get an Authorization Code +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Once You have registered you are ready to start integrating docker.io accounts +into your application! The process is usually started by a user following a +link in your application to an OAuth Authorization endpoint. + +.. http:get:: /api/v1.1/o/authorize/ + + Request that a docker.io user authorize your application. If the user is + not already logged in, they will be prompted to login. The user is then + presented with a form to authorize your application for the requested + access scope. On submission, the user will be redirected to the specified + ``redirect_uri`` with an Authorization Code. + + :query client_id: The ``client_id`` given to your application at + registration. + :query response_type: MUST be set to ``code``. This specifies that you + would like an Authorization Code returned. + :query redirect_uri: The URI to redirect back to after the user has + authorized your application. If omitted, the first of your registered + ``response_uris`` is used. If included, it must be one of the URIs + which were submitted when registering your application. + :query scope: The extent of access permissions you are requesting. + Currently, the scope options are ``profile_read``, ``profile_write``, + ``email_read``, and ``email_write``. Scopes must be separated by a + space. If omitted, the default scopes ``profile_read email_read`` are + used. + :query state: (Recommended) Used by your application to maintain state + between the authorization request and callback to protect against CSRF + attacks. + + **Example Request** + + Asking the user for authorization. + + .. sourcecode:: http + + GET /api/v1.1/o/authorize/?client_id=TestClientID&response_type=code&redirect_uri=https%3A//my.app/auth_complete/&scope=profile_read%20email_read&state=abc123 HTTP/1.1 + Host: www.docker.io + + **Authorization Page** + + When the user follows a link, making the above GET request, they will be + asked to login to their docker.io account if they are not already and then + be presented with the following authorization prompt which asks the user + to authorize your application with a description of the requested scopes. + + .. image:: _static/io_oauth_authorization_page.png + + Once the user allows or denies your Authorization Request the user will be + redirected back to your application. Included in that request will be the + following query parameters: + + ``code`` + The Authorization code generated by the docker.io authorization server. + Present it again to request an Access Token. This code expires in 60 + seconds. + + ``state`` + If the ``state`` parameter was present in the authorization request this + will be the exact value received from that request. + + ``error`` + An error message in the event of the user denying the authorization or + some other kind of error with the request. + + +3.2 Get an Access Token +^^^^^^^^^^^^^^^^^^^^^^^ + +Once the user has authorized your application, a request will be made to your +application's specified ``redirect_uri`` which includes a ``code`` parameter +that you must then use to get an Access Token. + +.. http:post:: /api/v1.1/o/token/ + + Submit your newly granted Authorization Code and your application's + credentials to receive an Access Token and Refresh Token. The code is valid + for 60 seconds and cannot be used more than once. + + :reqheader Authorization: HTTP basic authentication using your + application's ``client_id`` and ``client_secret`` + + :form grant_type: MUST be set to ``authorization_code`` + :form code: The authorization code received from the user's redirect + request. + :form redirect_uri: The same ``redirect_uri`` used in the authentication + request. + + **Example Request** + + Using an authorization code to get an access token. + + .. sourcecode:: http + + POST /api/v1.1/o/token/ HTTP/1.1 + Host: www.docker.io + Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ= + Accept: application/json + Content-Type: application/json + + { + "grant_type": "code", + "code": "YXV0aG9yaXphdGlvbl9jb2Rl", + "redirect_uri": "https://my.app/auth_complete/" + } + + **Example Response** + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json;charset=UTF-8 + + { + "username": "janedoe", + "user_id": 42, + "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS", + "expires_in": 15552000, + "token_type": "Bearer", + "scope": "profile_read email_read", + "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc" + } + + In the case of an error, there will be a non-200 HTTP Status and and data + detailing the error. + + +3.3 Refresh a Token +^^^^^^^^^^^^^^^^^^^ + +Once the Access Token expires you can use your ``refresh_token`` to have +docker.io issue your application a new Access Token, if the user has not +revoked access from your application. + +.. http:post:: /api/v1.1/o/token/ + + Submit your ``refresh_token`` and application's credentials to receive a + new Access Token and Refresh Token. The ``refresh_token`` can be used + only once. + + :reqheader Authorization: HTTP basic authentication using your + application's ``client_id`` and ``client_secret`` + + :form grant_type: MUST be set to ``refresh_token`` + :form refresh_token: The ``refresh_token`` which was issued to your + application. + :form scope: (optional) The scope of the access token to be returned. + Must not include any scope not originally granted by the user and if + omitted is treated as equal to the scope originally granted. + + **Example Request** + + Refreshing an access token. + + .. sourcecode:: http + + POST /api/v1.1/o/token/ HTTP/1.1 + Host: www.docker.io + Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ= + Accept: application/json + Content-Type: application/json + + { + "grant_type": "refresh_token", + "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc", + } + + **Example Response** + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json;charset=UTF-8 + + { + "username": "janedoe", + "user_id": 42, + "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS", + "expires_in": 15552000, + "token_type": "Bearer", + "scope": "profile_read email_read", + "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc" + } + + In the case of an error, there will be a non-200 HTTP Status and and data + detailing the error. + + +4. Use an Access Token with the API +=================================== + +Many of the docker.io API requests will require a Authorization request header +field. Simply ensure you add this header with "Bearer <``access_token``>": + +.. sourcecode:: http + + GET /api/v1.1/resource HTTP/1.1 + Host: docker.io + Authorization: Bearer 2YotnFZFEjr1zCsicMWpAA diff --git a/docs/sources/reference/api/docker_remote_api.rst b/docs/sources/reference/api/docker_remote_api.rst index f7cd7faf4f..e1071bf085 100644 --- a/docs/sources/reference/api/docker_remote_api.rst +++ b/docs/sources/reference/api/docker_remote_api.rst @@ -2,7 +2,7 @@ :description: API Documentation for Docker :keywords: API, Docker, rcli, REST, documentation -.. COMMENT use http://pythonhosted.org/sphinxcontrib-httpdomain/ to +.. COMMENT use https://pythonhosted.org/sphinxcontrib-httpdomain/ to .. document the REST API. ================= @@ -26,15 +26,36 @@ Docker Remote API 2. Versions =========== -The current version of the API is 1.9 +The current version of the API is 1.10 Calling /images//insert is the same as calling -/v1.9/images//insert +/v1.10/images//insert You can still call an old version of the api using /v1.0/images//insert +v1.10 +***** + +Full Documentation +------------------ + +:doc:`docker_remote_api_v1.10` + +What's new +---------- + +.. http:delete:: /images/(name) + + **New!** You can now use the force parameter to force delete of an image, even if it's + tagged in multiple repositories. + +.. http:delete:: /containers/(id) + + **New!** You can now use the force paramter to force delete a container, even if + it is currently running + v1.9 **** diff --git a/docs/sources/reference/api/docker_remote_api_v1.0.rst b/docs/sources/reference/api/docker_remote_api_v1.0.rst index dc06a27fc0..fa4b969758 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.0.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.0.rst @@ -732,11 +732,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 500: server error diff --git a/docs/sources/reference/api/docker_remote_api_v1.1.rst b/docs/sources/reference/api/docker_remote_api_v1.1.rst index 31b34caf5a..92b5039aa6 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.1.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.1.rst @@ -742,11 +742,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.rst b/docs/sources/reference/api/docker_remote_api_v1.10.rst new file mode 100644 index 0000000000..ed63525e7e --- /dev/null +++ b/docs/sources/reference/api/docker_remote_api_v1.10.rst @@ -0,0 +1,1283 @@ +:title: Remote API v1.10 +:description: API Documentation for Docker +:keywords: API, Docker, rcli, REST, documentation + +:orphan: + +======================= +Docker Remote API v1.10 +======================= + +.. contents:: Table of Contents + +1. Brief introduction +===================== + +- The Remote API has replaced rcli +- The daemon listens on ``unix:///var/run/docker.sock``, but you can + :ref:`bind_docker`. +- The API tends to be REST, but for some complex commands, like + ``attach`` or ``pull``, the HTTP connection is hijacked to transport + ``stdout, stdin`` and ``stderr`` + +2. Endpoints +============ + +2.1 Containers +-------------- + +List containers +*************** + +.. http:get:: /containers/json + + List containers + + **Example request**: + + .. sourcecode:: http + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + } + ] + + :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default + :query limit: Show ``limit`` last created containers, include non-running ones. + :query since: Show only containers created since Id, include non-running ones. + :query before: Show only containers created before Id, include non-running ones. + :query size: 1/True/true or 0/False/false, Show the containers sizes + :statuscode 200: no error + :statuscode 400: bad parameter + :statuscode 500: server error + + +Create a container +****************** + +.. http:post:: /containers/create + + Create a container + + **Example request**: + + .. sourcecode:: http + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "VolumesFrom":"", + "WorkingDir":"", + "ExposedPorts":{ + "22/tcp": {} + } + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + + :jsonparam config: the container's configuration + :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. + :statuscode 201: no error + :statuscode 404: no such container + :statuscode 406: impossible to attach (container not running) + :statuscode 500: server error + + +Inspect a container +******************* + +.. http:get:: /containers/(id)/json + + Return low-level information on the container ``id`` + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir":"" + + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +List processes running inside a container +***************************************** + +.. http:get:: /containers/(id)/top + + List processes running inside the container ``id`` + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles":[ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes":[ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + + :query ps_args: ps arguments to use (eg. aux) + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Inspect changes on a container's filesystem +******************************************* + +.. http:get:: /containers/(id)/changes + + Inspect changes on container ``id`` 's filesystem + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } + ] + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Export a container +****************** + +.. http:get:: /containers/(id)/export + + Export the contents of container ``id`` + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Start a container +***************** + +.. http:post:: /containers/(id)/start + + Start the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 No Content + Content-Type: text/plain + + :jsonparam hostConfig: the container's host configuration (optional) + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Stop a container +**************** + +.. http:post:: /containers/(id)/stop + + Stop the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :query t: number of seconds to wait before killing the container + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Restart a container +******************* + +.. http:post:: /containers/(id)/restart + + Restart the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :query t: number of seconds to wait before killing the container + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Kill a container +**************** + +.. http:post:: /containers/(id)/kill + + Kill the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/e90e34656806/kill HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Attach to a container +********************* + +.. http:post:: /containers/(id)/attach + + Attach to the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + :query logs: 1/True/true or 0/False/false, return logs. Default false + :query stream: 1/True/true or 0/False/false, return stream. Default false + :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false + :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false + :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false + :statuscode 200: no error + :statuscode 400: bad parameter + :statuscode 404: no such container + :statuscode 500: server error + + **Stream details**: + + When using the TTY setting is enabled in + :http:post:`/containers/create`, the stream is the raw data + from the process PTY and client's stdin. When the TTY is + disabled, then the stream is multiplexed to separate stdout + and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write + the stream (stdout or stderr). It also contain the size of + the associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this:: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + ``STREAM_TYPE`` can be: + + - 0: stdin (will be writen on stdout) + - 1: stdout + - 2: stderr + + ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1) Read 8 bytes + 2) chose stdout or stderr depending on the first byte + 3) Extract the frame size from the last 4 byets + 4) Read the extracted size and output it on the correct output + 5) Goto 1) + + + +Wait a container +**************** + +.. http:post:: /containers/(id)/wait + + Block until container ``id`` stops, then returns the exit code + + **Example request**: + + .. sourcecode:: http + + POST /containers/16253994b7c4/wait HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode":0} + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Remove a container +******************* + +.. http:delete:: /containers/(id) + + Remove the container ``id`` from the filesystem + + **Example request**: + + .. sourcecode:: http + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false + :query force: 1/True/true or 0/False/false, Removes the container even if it was running. Default false + :statuscode 204: no error + :statuscode 400: bad parameter + :statuscode 404: no such container + :statuscode 500: server error + + +Copy files or folders from a container +************************************** + +.. http:post:: /containers/(id)/copy + + Copy files or folders of container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource":"test.txt" + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +2.2 Images +---------- + +List Images +*********** + +.. http:get:: /images/json + + **Example request**: + + .. sourcecode:: http + + GET /images/json?all=0 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Create an image +*************** + +.. http:post:: /images/create + + Create an image, either by pull it from the registry or by importing it + + **Example request**: + + .. sourcecode:: http + + POST /images/create?fromImage=base HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, + the ``X-Registry-Auth`` header can be used to include a + base64-encoded AuthConfig object. + + :query fromImage: name of the image to pull + :query fromSrc: source to import, - means stdin + :query repo: repository + :query tag: tag + :query registry: the registry to pull from + :reqheader X-Registry-Auth: base64-encoded AuthConfig object + :statuscode 200: no error + :statuscode 500: server error + + + +Insert a file in an image +************************* + +.. http:post:: /images/(name)/insert + + Insert a file from ``url`` in the image ``name`` at ``path`` + + **Example request**: + + .. sourcecode:: http + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} + {"error":"Invalid..."} + ... + + :statuscode 200: no error + :statuscode 500: server error + + +Inspect an image +**************** + +.. http:get:: /images/(name)/json + + Return low-level information on the image ``name`` + + **Example request**: + + .. sourcecode:: http + + GET /images/base/json HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"] + ,"Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 500: server error + + +Get the history of an image +*************************** + +.. http:get:: /images/(name)/history + + Return the history of the image ``name`` + + **Example request**: + + .. sourcecode:: http + + GET /images/base/history HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 500: server error + + +Push an image on the registry +***************************** + +.. http:post:: /images/(name)/push + + Push the image ``name`` on the registry + + **Example request**: + + .. sourcecode:: http + + POST /images/test/push HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"error":"Invalid..."} + ... + + :query registry: the registry you wan to push, optional + :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object. + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 500: server error + + +Tag an image into a repository +****************************** + +.. http:post:: /images/(name)/tag + + Tag the image ``name`` into a repository + + **Example request**: + + .. sourcecode:: http + + POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 201 OK + + :query repo: The repository to tag in + :query force: 1/True/true or 0/False/false, default false + :statuscode 201: no error + :statuscode 400: bad parameter + :statuscode 404: no such image + :statuscode 409: conflict + :statuscode 500: server error + + +Remove an image +*************** + +.. http:delete:: /images/(name) + + Remove the image ``name`` from the filesystem + + **Example request**: + + .. sourcecode:: http + + DELETE /images/test HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged":"3e2f21a89f"}, + {"Deleted":"3e2f21a89f"}, + {"Deleted":"53b4f83ac9"} + ] + + :query force: 1/True/true or 0/False/false, default false + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 409: conflict + :statuscode 500: server error + + +Search images +************* + +.. http:get:: /images/search + + Search for an image in the docker index. + + .. note:: + + The response keys have changed from API v1.6 to reflect the JSON + sent by the registry server to the docker daemon's request. + + **Example request**: + + .. sourcecode:: http + + GET /images/search?term=sshd HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + + :query term: term to search + :statuscode 200: no error + :statuscode 500: server error + + +2.3 Misc +-------- + +Build an image from Dockerfile via stdin +**************************************** + +.. http:post:: /build + + Build an image from Dockerfile via stdin + + **Example request**: + + .. sourcecode:: http + + POST /build HTTP/1.1 + + {{ STREAM }} + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream":"Step 1..."} + {"stream":"..."} + {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} + + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, + xz. + + The archive must include a file called ``Dockerfile`` at its + root. It may include any number of other files, which will be + accessible in the build context (See the :ref:`ADD build command + `). + + :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success + :query q: suppress verbose build output + :query nocache: do not use the cache when building the image + :reqheader Content-type: should be set to ``"application/tar"``. + :reqheader X-Registry-Config: base64-encoded ConfigFile object + :statuscode 200: no error + :statuscode 500: server error + + + +Check auth configuration +************************ + +.. http:post:: /auth + + Get the default username and email + + **Example request**: + + .. sourcecode:: http + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com", + "serveraddress":"https://index.docker.io/v1/" + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + + :statuscode 200: no error + :statuscode 204: no error + :statuscode 500: server error + + +Display system-wide information +******************************* + +.. http:get:: /info + + Display system-wide information + + **Example request**: + + .. sourcecode:: http + + GET /info HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + + :statuscode 200: no error + :statuscode 500: server error + + +Show the docker version information +*********************************** + +.. http:get:: /version + + Show the docker version information + + **Example request**: + + .. sourcecode:: http + + GET /version HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + + :statuscode 200: no error + :statuscode 500: server error + + +Create a new image from a container's changes +********************************************* + +.. http:post:: /commit + + Create a new image from a container's changes + + **Example request**: + + .. sourcecode:: http + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id":"596069db4bf5"} + + :query container: source container + :query repo: repository + :query tag: tag + :query m: commit message + :query author: author (eg. "John Hannibal Smith ") + :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) + :statuscode 201: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Monitor Docker's events +*********************** + +.. http:get:: /events + + Get events from docker, either in real time via streaming, or via polling (using `since`) + + **Example request**: + + .. sourcecode:: http + + GET /events?since=1374067924 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} + + :query since: timestamp used for polling + :statuscode 200: no error + :statuscode 500: server error + +Get a tarball containing all images and tags in a repository +************************************************************ + +.. http:get:: /images/(name)/get + + Get a tarball containing all images and metadata for the repository specified by ``name``. + + **Example request** + + .. sourcecode:: http + + GET /images/ubuntu/get + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + + :statuscode 200: no error + :statuscode 500: server error + +Load a tarball with a set of images and tags into docker +******************************************************** + +.. http:post:: /images/load + + Load a set of images and tags into the docker repository. + + **Example request** + + .. sourcecode:: http + + POST /images/load + + Tarball in body + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + + :statuscode 200: no error + :statuscode 500: server error + +3. Going further +================ + +3.1 Inside 'docker run' +----------------------- + +Here are the steps of 'docker run' : + +* Create the container +* If the status code is 404, it means the image doesn't exists: + * Try to pull it + * Then retry to create the container +* Start the container +* If you are not in detached mode: + * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 +* If in detached mode or only stdin is attached: + * Display the container's id + + +3.2 Hijacking +------------- + +In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. + +3.3 CORS Requests +----------------- + +To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. + +.. code-block:: bash + + docker -d -H="192.168.1.9:4243" -api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.rst b/docs/sources/reference/api/docker_remote_api_v1.2.rst index 555ec14b75..1ae2db696f 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.2.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.2.rst @@ -761,11 +761,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.rst b/docs/sources/reference/api/docker_remote_api_v1.3.rst index ab452798b9..cb4c54642d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.3.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.3.rst @@ -808,11 +808,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.rst b/docs/sources/reference/api/docker_remote_api_v1.4.rst index 5c8884b16f..39c8839653 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.4.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.4.rst @@ -852,11 +852,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.rst b/docs/sources/reference/api/docker_remote_api_v1.5.rst index 609fc6b056..0cdbaf747a 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.5.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.5.rst @@ -831,11 +831,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.rst b/docs/sources/reference/api/docker_remote_api_v1.6.rst index df53275a4f..a9ddfb2c13 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.6.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.6.rst @@ -958,11 +958,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.rst b/docs/sources/reference/api/docker_remote_api_v1.7.rst index 28c5ba30f2..cacd7ab6f7 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.7.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.7.rst @@ -877,11 +877,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.rst b/docs/sources/reference/api/docker_remote_api_v1.8.rst index 6ccc6eca94..b752f2f8a4 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.8.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.8.rst @@ -118,6 +118,7 @@ Create a container "User":"", "Memory":0, "MemorySwap":0, + "CpuShares":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, @@ -153,7 +154,15 @@ Create a container "Warnings":[] } - :jsonparam config: the container's configuration + :jsonparam Hostname: Container host name + :jsonparam User: Username or UID + :jsonparam Memory: Memory Limit in bytes + :jsonparam CpuShares: CPU shares (relative weight) + :jsonparam AttachStdin: 1/True/true or 0/False/false, attach to standard input. Default false + :jsonparam AttachStdout: 1/True/true or 0/False/false, attach to standard output. Default false + :jsonparam AttachStderr: 1/True/true or 0/False/false, attach to standard error. Default false + :jsonparam Tty: 1/True/true or 0/False/false, allocate a pseudo-tty. Default false + :jsonparam OpenStdin: 1/True/true or 0/False/false, keep stdin open even if not attached. Default false :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. :statuscode 201: no error :statuscode 404: no such container @@ -394,7 +403,11 @@ Start a container HTTP/1.1 204 No Content Content-Type: text/plain - :jsonparam hostConfig: the container's host configuration (optional) + :jsonparam Binds: Create a bind mount to a directory or file with [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. + :jsonparam LxcConf: Map of custom lxc options + :jsonparam PortBindings: Expose ports from the container, optionally publishing them via the HostPort flag + :jsonparam PublishAllPorts: 1/True/true or 0/False/false, publish all exposed ports to the host interfaces. Default false + :jsonparam Privileged: 1/True/true or 0/False/false, give extended privileges to this container. Default false :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error @@ -892,11 +905,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.rst b/docs/sources/reference/api/docker_remote_api_v1.9.rst index cb406da82b..9430ff370d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.9.rst @@ -118,6 +118,7 @@ Create a container "User":"", "Memory":0, "MemorySwap":0, + "CpuShares":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, @@ -153,7 +154,15 @@ Create a container "Warnings":[] } - :jsonparam config: the container's configuration + :jsonparam Hostname: Container host name + :jsonparam User: Username or UID + :jsonparam Memory: Memory Limit in bytes + :jsonparam CpuShares: CPU shares (relative weight) + :jsonparam AttachStdin: 1/True/true or 0/False/false, attach to standard input. Default false + :jsonparam AttachStdout: 1/True/true or 0/False/false, attach to standard output. Default false + :jsonparam AttachStderr: 1/True/true or 0/False/false, attach to standard error. Default false + :jsonparam Tty: 1/True/true or 0/False/false, allocate a pseudo-tty. Default false + :jsonparam OpenStdin: 1/True/true or 0/False/false, keep stdin open even if not attached. Default false :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. :statuscode 201: no error :statuscode 404: no such container @@ -394,7 +403,11 @@ Start a container HTTP/1.1 204 No Content Content-Type: text/plain - :jsonparam hostConfig: the container's host configuration (optional) + :jsonparam Binds: Create a bind mount to a directory or file with [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. + :jsonparam LxcConf: Map of custom lxc options + :jsonparam PortBindings: Expose ports from the container, optionally publishing them via the HostPort flag + :jsonparam PublishAllPorts: 1/True/true or 0/False/false, publish all exposed ports to the host interfaces. Default false + :jsonparam Privileged: 1/True/true or 0/False/false, give extended privileges to this container. Default false :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error @@ -892,11 +905,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict @@ -993,12 +1006,12 @@ Search images 2.3 Misc -------- -Build an image from Dockerfile via stdin -**************************************** +Build an image from Dockerfile +****************************** .. http:post:: /build - Build an image from Dockerfile via stdin + Build an image from Dockerfile using a POST body. **Example request**: @@ -1032,6 +1045,7 @@ Build an image from Dockerfile via stdin :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success :query q: suppress verbose build output :query nocache: do not use the cache when building the image + :query rm: Remove intermediate containers after a successful build :reqheader Content-type: should be set to ``"application/tar"``. :reqheader X-Registry-Config: base64-encoded ConfigFile object :statuscode 200: no error diff --git a/docs/sources/reference/api/index.rst b/docs/sources/reference/api/index.rst index 017369143c..3c84a505c6 100644 --- a/docs/sources/reference/api/index.rst +++ b/docs/sources/reference/api/index.rst @@ -15,4 +15,6 @@ Your programs and scripts can access Docker's functionality via these interfaces index_api docker_remote_api remote_api_client_libraries + docker_io_oauth_api + docker_io_accounts_api diff --git a/docs/sources/reference/api/remote_api_client_libraries.rst b/docs/sources/reference/api/remote_api_client_libraries.rst index 45ce8ff9d1..9bab343bf5 100644 --- a/docs/sources/reference/api/remote_api_client_libraries.rst +++ b/docs/sources/reference/api/remote_api_client_libraries.rst @@ -1,6 +1,6 @@ :title: Remote API Client Libraries :description: Various client libraries available to use with the Docker remote API -:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, Javascript, Erlang, Go +:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, JavaScript, Erlang, Go ================================== @@ -21,12 +21,18 @@ and we will add the libraries here. +----------------------+----------------+--------------------------------------------+----------+ | Ruby | docker-api | https://github.com/swipely/docker-api | Active | +----------------------+----------------+--------------------------------------------+----------+ -| Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active | +| JavaScript (NodeJS) | dockerode | https://github.com/apocas/dockerode | Active | +| | | Install via NPM: `npm install dockerode` | | ++----------------------+----------------+--------------------------------------------+----------+ +| JavaScript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active | | | | Install via NPM: `npm install docker.io` | | +----------------------+----------------+--------------------------------------------+----------+ -| Javascript | docker-js | https://github.com/dgoujard/docker-js | Active | +| JavaScript | docker-js | https://github.com/dgoujard/docker-js | Outdated | +----------------------+----------------+--------------------------------------------+----------+ -| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active | +| JavaScript (Angular) | docker-cp | https://github.com/13W/docker-cp | Active | +| **WebUI** | | | | ++----------------------+----------------+--------------------------------------------+----------+ +| JavaScript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active | | **WebUI** | | | | +----------------------+----------------+--------------------------------------------+----------+ | Java | docker-java | https://github.com/kpelykh/docker-java | Active | @@ -37,3 +43,5 @@ and we will add the libraries here. +----------------------+----------------+--------------------------------------------+----------+ | PHP | Alvine | http://pear.alvine.io/ (alpha) | Active | +----------------------+----------------+--------------------------------------------+----------+ +| PHP | Docker-PHP | http://stage1.github.io/docker-php/ | Active | ++----------------------+----------------+--------------------------------------------+----------+ diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst index 571824c36c..9f7a816801 100644 --- a/docs/sources/reference/builder.rst +++ b/docs/sources/reference/builder.rst @@ -74,7 +74,7 @@ When you're done with your build, you're ready to look into 2. Format ========= -The Dockerfile format is quite simple: +Here is the format of the Dockerfile: :: @@ -251,9 +251,14 @@ value ````. This value will be passed to all future ``RUN`` instructions. This is functionally equivalent to prefixing the command with ``=`` +The environment variables set using ``ENV`` will persist when a container is run +from the resulting image. You can view the values using ``docker inspect``, and change them using ``docker run --env =``. + .. note:: - The environment variables will persist when a container is run - from the resulting image. + One example where this can cause unexpected consequenses, is setting + ``ENV DEBIAN_FRONTEND noninteractive``. + Which will persist when the container is run interactively; for example: + ``docker run -t -i image bash`` .. _dockerfile_add: @@ -461,6 +466,8 @@ For example you might add something like this: ONBUILD RUN /usr/local/bin/python-build --dir /app/src [...] +.. warning:: Chaining ONBUILD instructions using `ONBUILD ONBUILD` isn't allowed. +.. warning:: ONBUILD may not trigger FROM or MAINTAINER instructions. .. _dockerfile_examples: diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index ae77080309..2404e29b29 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -12,7 +12,7 @@ To list available commands, either run ``docker`` with no parameters or execute $ sudo docker Usage: docker [OPTIONS] COMMAND [arg...] - -H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind/connect to or unix://[/path/to/socket] to use. When host=[0.0.0.0], port=[4243] or path=[/var/run/docker.sock] is omitted, default values are used. + -H=[unix:///var/run/docker.sock]: tcp://[host]:port to bind/connect to or unix://[/path/to/socket] to use. When host=[127.0.0.1] is omitted for tcp or path=[/var/run/docker.sock] is omitted for unix sockets, default values are used. A self-sufficient runtime for linux containers. @@ -20,8 +20,12 @@ To list available commands, either run ``docker`` with no parameters or execute .. _cli_options: -Types of Options ----------------- +Options +------- + +Single character commandline options can be combined, so rather than typing +``docker run -t -i --name test busybox sh``, you can write +``docker run -ti --name test busybox sh``. Boolean ~~~~~~~ @@ -67,6 +71,7 @@ Commands Usage of docker: -D, --debug=false: Enable debug mode -H, --host=[]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise. systemd socket activation can be used with fd://[socketfd]. + -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group --api-enable-cors=false: Enable CORS headers in the remote API -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking --bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b @@ -79,8 +84,9 @@ Commands -p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file -r, --restart=true: Restart previously running containers -s, --storage-driver="": Force the docker runtime to use a specific storage driver + -e, --exec-driver="native": Force the docker runtime to use a specific exec driver -v, --version=false: Print version information and quit - -mtu, --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available + --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the daemon and client. To run the daemon you provide the ``-d`` flag. @@ -91,6 +97,8 @@ To set the DNS server for all Docker containers, use ``docker -d -dns 8.8.8.8``. To run the daemon with debug output, use ``docker -d -D``. +To use lxc as the execution driver, use ``docker -d -e lxc``. + The docker client will also honor the ``DOCKER_HOST`` environment variable to set the ``-H`` flag for the client. @@ -102,12 +110,21 @@ the ``-H`` flag for the client. docker ps # both are equal - To run the daemon with `systemd socket activation `_, use ``docker -d -H fd://``. Using ``fd://`` will work perfectly for most setups but you can also specify individual sockets too ``docker -d -H fd://3``. If the specified socket activated files aren't found then docker will exit. You can find examples of using systemd socket activation with docker and systemd in the `docker source tree `_. +Docker supports softlinks for the Docker data directory (``/var/lib/docker``) and for ``/tmp``. +TMPDIR and the data directory can be set like this: + +:: + + TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 + # or + export TMPDIR=/mnt/disk2/tmp + /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 + .. _cli_attach: ``attach`` @@ -179,17 +196,18 @@ Examples: Usage: docker build [OPTIONS] PATH | URL | - Build a new container image from the source code at PATH - -t, --time="": Repository name (and optionally a tag) to be applied + -t, --tag="": Repository name (and optionally a tag) to be applied to the resulting image in case of success. - -q, --quiet=false: Suppress verbose build output. + -q, --quiet=false: Suppress the verbose output generated by the containers. --no-cache: Do not use the cache when building the image. - --rm: Remove intermediate containers after a successful build + --rm=true: Remove intermediate containers after a successful build The files at ``PATH`` or ``URL`` are called the "context" of the build. The build process may refer to any of the files in the context, for example when using an :ref:`ADD ` instruction. When a single ``Dockerfile`` is given as ``URL``, then no context is set. When a Git repository is set as -``URL``, then the repository is used as the context +``URL``, then the repository is used as the context. Git repositories are +cloned with their submodules (`git clone --recursive`). .. _cli_build_examples: @@ -223,6 +241,9 @@ Examples: ---> Running in 02071fceb21b ---> f52f38b7823e Successfully built f52f38b7823e + Removing intermediate container 9c9e81692ae9 + Removing intermediate container 02071fceb21b + This example specifies that the ``PATH`` is ``.``, and so all the files in the local directory get tar'd and sent to the Docker daemon. The ``PATH`` @@ -237,6 +258,9 @@ The transfer of context from the local machine to the Docker daemon is what the ``docker`` client means when you see the "Uploading context" message. +If you wish to keep the intermediate containers after the build is complete, +you must use ``--rm=false``. This does not affect the build cache. + .. code-block:: bash @@ -504,7 +528,7 @@ For example: Show the history of an image --no-trunc=false: Don't truncate output - -q, --quiet=false: only show numeric IDs + -q, --quiet=false: Only show numeric IDs To see how the ``docker:latest`` image was built: @@ -551,11 +575,11 @@ To see how the ``docker:latest`` image was built: List images - -a, --all=false: show all images (by default filter out the intermediate images used to build) + -a, --all=false: Show all images (by default filter out the intermediate images used to build) --no-trunc=false: Don't truncate output - -q, --quiet=false: only show numeric IDs - --tree=false: output graph in tree format - --viz=false: output graph in graphviz format + -q, --quiet=false: Only show numeric IDs + --tree=false: Output graph in tree format + --viz=false: Output graph in graphviz format Listing the most recently created images ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -792,6 +816,19 @@ we ask for the ``HostPort`` field to get the public address. $ sudo docker inspect -format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID +Get config +.......... + +The ``.Field`` syntax doesn't work when the field contains JSON data, +but the template language's custom ``json`` function does. The ``.config`` +section contains complex json object, so to grab it as JSON, you use ``json`` +to convert config object into JSON + +.. code-block:: bash + + $ sudo docker inspect -format='{{json .config}}' $INSTANCE_ID + + .. _cli_kill: ``kill`` @@ -838,9 +875,9 @@ Known Issues (kill) Register or Login to the docker registry server - -e, --email="": email - -p, --password="": password - -u, --username="": username + -e, --email="": Email + -p, --password="": Password + -u, --username="": Username If you want to login to a private registry you can specify this by adding the server name. @@ -911,6 +948,8 @@ Running ``docker ps`` showing 2 linked containers. The last container is marked as a ``Ghost`` container. It is a container that was running when the docker daemon was restarted (upgraded, or ``-H`` settings changed). The container is still running, but as this docker daemon process is not able to manage it, you can't attach to it. To bring them out of ``Ghost`` Status, you need to use ``docker kill`` or ``docker restart``. +``docker ps`` will show only running containers by default. To see all containers: ``docker ps -a`` + .. _cli_pull: ``pull`` @@ -956,7 +995,8 @@ The last container is marked as a ``Ghost`` container. It is a container that wa Usage: docker rm [OPTIONS] CONTAINER Remove one or more containers - --link="": Remove the link instead of the actual container + -l, --link="": Remove the link instead of the actual container + -f, --force=false: Force removal of running container Known Issues (rm) ~~~~~~~~~~~~~~~~~ @@ -1005,6 +1045,8 @@ containers will not be deleted. Usage: docker rmi IMAGE [IMAGE...] Remove one or more images + + -f, --force=false: Force Removing tagged images ~~~~~~~~~~~~~~~~~~~~~~ @@ -1054,7 +1096,7 @@ image is removed. --cidfile="": Write the container ID to the file -d, --detach=false: Detached mode: Run container in the background, print new container id -e, --env=[]: Set environment variables - -h, --host="": Container host name + -h, --hostname="": Container host name -i, --interactive=false: Keep stdin open even if not attached --privileged=false: Give extended privileges to this container -m, --memory="": Memory limit (format: , where unit = b, k, m or g) @@ -1079,10 +1121,15 @@ The ``docker run`` command first ``creates`` a writeable container layer over the specified image, and then ``starts`` it using the specified command. That is, ``docker run`` is equivalent to the API ``/containers/create`` then ``/containers/(id)/start``. +Once the container is stopped it still exists and can be started back up. See ``docker ps -a`` to view a list of all containers. The ``docker run`` command can be used in combination with ``docker commit`` to :ref:`change the command that a container runs `. +See :ref:`port_redirection` for more detailed information about the ``--expose``, +``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for +specific examples using ``--link``. + Known Issues (run -volumes-from) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1202,7 +1249,7 @@ to the newly created container. $ sudo docker run --volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd The ``--volumes-from`` flag mounts all the defined volumes from the -referenced containers. Containers can be specified by a comma seperated +referenced containers. Containers can be specified by a comma separated list or by repetitions of the ``--volumes-from`` argument. The container ID may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted @@ -1291,7 +1338,7 @@ The main process inside the container will receive SIGTERM, and after a grace pe :: - Usage: docker tag [OPTIONS] IMAGE REPOSITORY[:TAG] + Usage: docker tag [OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG] Tag an image into a repository diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst index 307edace00..d8de280671 100644 --- a/docs/sources/reference/run.rst +++ b/docs/sources/reference/run.rst @@ -143,6 +143,7 @@ Network Settings ---------------- :: + -n=true : Enable networking for this container -dns=[] : Set custom dns servers for the container diff --git a/docs/sources/terms/index.rst b/docs/sources/terms/index.rst index 882d83f0d4..40851082b5 100644 --- a/docs/sources/terms/index.rst +++ b/docs/sources/terms/index.rst @@ -18,5 +18,7 @@ Contents: layer image container + registry + repository diff --git a/docs/sources/terms/registry.rst b/docs/sources/terms/registry.rst new file mode 100644 index 0000000000..90c3ee721c --- /dev/null +++ b/docs/sources/terms/registry.rst @@ -0,0 +1,16 @@ +:title: Registry +:description: Definition of an Registry +:keywords: containers, lxc, concepts, explanation, image, repository, container + +.. _registry_def: + +Registry +========== + +A Registry is a hosted service containing :ref:`repositories` +of :ref:`images` which responds to the Registry API. + +The default registry can be accessed using a browser at http://images.docker.io +or using the ``sudo docker search`` command. + +For more information see :ref:`Working with Repositories` diff --git a/docs/sources/terms/repository.rst b/docs/sources/terms/repository.rst new file mode 100644 index 0000000000..e4fe4b8fd1 --- /dev/null +++ b/docs/sources/terms/repository.rst @@ -0,0 +1,30 @@ +:title: Repository +:description: Definition of an Repository +:keywords: containers, lxc, concepts, explanation, image, repository, container + +.. _repository_def: + +Repository +========== + +A repository is a set of images either on your local Docker server, or +shared, by pushing it to a :ref:`Registry` server. + +Images can be associated with a repository (or multiple) by giving them an image name +using one of three different commands: + +1. At build time (e.g. ``sudo docker build -t IMAGENAME``), +2. When committing a container (e.g. ``sudo docker commit CONTAINERID IMAGENAME``) or +3. When tagging an image id with an image name (e.g. ``sudo docker tag IMAGEID IMAGENAME``). + +A `Fully Qualified Image Name` (FQIN) can be made up of 3 parts: + +``[registry_hostname[:port]/][user_name/](repository_name[:version_tag])`` + +``version_tag`` defaults to ``latest``, ``username`` and ``registry_hostname`` default to an empty string. +When ``registry_hostname`` is an empty string, then ``docker push`` will push to ``index.docker.io:80``. + +If you create a new repository which you want to share, you will need to set at least the +``user_name``, as the 'default' blank ``user_name`` prefix is reserved for official Docker images. + +For more information see :ref:`Working with Repositories` diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst index 6bd1f0b7a0..24c22bba39 100644 --- a/docs/sources/use/basics.rst +++ b/docs/sources/use/basics.rst @@ -50,6 +50,7 @@ Running an interactive shell # allocate a tty, attach stdin and stdout # To detach the tty without exiting the shell, # use the escape sequence Ctrl-p + Ctrl-q + # note: This will continue to exist in a stopped state once exited (see "docker ps -a") sudo docker run -i -t ubuntu /bin/bash .. _bind_docker: @@ -59,10 +60,10 @@ Bind Docker to another host/port or a Unix socket .. warning:: Changing the default ``docker`` daemon binding to a TCP port or Unix *docker* user group will increase your security risks - by allowing non-root users to potentially gain *root* access on the - host (`e.g. #1369 - `_). Make sure you - control access to ``docker``. + by allowing non-root users to gain *root* access on the + host. Make sure you control access to ``docker``. If you are binding + to a TCP port, anyone with access to that port has full Docker access; + so it is not advisable on an open network. With ``-H`` it is possible to make the Docker daemon to listen on a specific IP and port. By default, it will listen on @@ -121,12 +122,38 @@ Starting a long-running worker process sudo docker kill $JOB -Listing all running containers ------------------------------- +Listing containers +------------------ .. code-block:: bash - sudo docker ps + sudo docker ps # Lists only running containers + sudo docker ps -a # Lists all containers + + +Controlling containers +---------------------- +.. code-block:: bash + + # Start a new container + JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") + + # Stop the container + docker stop $JOB + + # Start the container + docker start $JOB + + # Restart the container + docker restart $JOB + + # SIGKILL a container + docker kill $JOB + + # Remove a container + docker stop $JOB # Container must be stopped to remove it + docker rm $JOB + Bind a service on a TCP port ------------------------------ diff --git a/docs/sources/use/host_integration.rst b/docs/sources/use/host_integration.rst index fb70195ffd..ed341cd4bc 100644 --- a/docs/sources/use/host_integration.rst +++ b/docs/sources/use/host_integration.rst @@ -18,10 +18,11 @@ the docker daemon with the ``-r=false`` so that docker will not automatically restart your containers when the host is restarted. When you have finished setting up your image and are happy with your -running container, you may want to use a process manager to manage +running container, you can then attach a process manager to manage it. When your run ``docker start -a`` docker will automatically attach -to the process and forward all signals so that the process manager can -detect when a container stops and correctly restart it. +to the running container, or start it if needed and forward all signals +so that the process manager can detect when a container stops and correctly +restart it. Here are a few sample scripts for systemd and upstart to integrate with docker. @@ -29,9 +30,10 @@ Here are a few sample scripts for systemd and upstart to integrate with docker. Sample Upstart Script --------------------- -In this example we've already created a container to run Redis with an id of -0a7e070b698b. To create an upstart script for our container, we create a file -named ``/etc/init/redis.conf`` and place the following into it: +In this example we've already created a container to run Redis with +``--name redis_server``. To create an upstart script for our container, +we create a file named ``/etc/init/redis.conf`` and place the following +into it: .. code-block:: bash @@ -46,7 +48,7 @@ named ``/etc/init/redis.conf`` and place the following into it: while [ ! -e $FILE ] ; do inotifywait -t 2 -e create $(dirname $FILE) done - /usr/bin/docker start -a 0a7e070b698b + /usr/bin/docker start -a redis_server end script Next, we have to configure docker so that it's run with the option ``-r=false``. @@ -69,8 +71,8 @@ Sample systemd Script [Service] Restart=always - ExecStart=/usr/bin/docker start -a 0a7e070b698b - ExecStop=/usr/bin/docker stop -t 2 0a7e070b698b + ExecStart=/usr/bin/docker start -a redis_server + ExecStop=/usr/bin/docker stop -t 2 redis_server [Install] WantedBy=local.target diff --git a/docs/sources/use/networking.rst b/docs/sources/use/networking.rst index 431158cc39..c00c608550 100644 --- a/docs/sources/use/networking.rst +++ b/docs/sources/use/networking.rst @@ -148,6 +148,6 @@ ip link command) and the namespaces infrastructure. I want more ------------ -Jérôme Petazzoni has create ``pipework`` to connect together +Jérôme Petazzoni has created ``pipework`` to connect together containers in arbitrarily complex scenarios : https://github.com/jpetazzo/pipework diff --git a/docs/sources/use/port_redirection.rst b/docs/sources/use/port_redirection.rst index 5cddb238e4..38d6b98841 100644 --- a/docs/sources/use/port_redirection.rst +++ b/docs/sources/use/port_redirection.rst @@ -31,6 +31,15 @@ container, Docker provide ways to bind the container port to an interface of the host system. To simplify communication between containers, Docker provides the linking mechanism. +Auto map all exposed ports on the host +-------------------------------------- + +To bind all the exposed container ports to the host automatically, use +``docker run -P ``. The mapped host ports will be auto-selected +from a pool of unused ports (49000..49900), and you will need to use +``docker ps``, ``docker inspect `` or +``docker port `` to determine what they are. + Binding a port to a host interface ----------------------------------- @@ -76,7 +85,7 @@ dynamically allocated ports: .. code-block:: bash # Bind to a dynamically allocated port - docker run -p 127.0.0.1::8080 -name dyn-bound + docker run -p 127.0.0.1::8080 --name dyn-bound # Lookup the actual port docker port dyn-bound 8080 @@ -112,7 +121,7 @@ Dockerfile: .. code-block:: bash # Expose port 80 - docker run -expose 80 -name server + docker run -expose 80 --name server The ``client`` then links to the ``server``: @@ -140,4 +149,4 @@ This tells ``client`` that a service is running on port 80 of ``server`` and that ``server`` is accessible at the IP address 172.17.0.8 -Note: Using the ``-p`` parameter also exposes the port.. +Note: Using the ``-p`` parameter also exposes the port. diff --git a/docs/sources/use/working_with_links_names.rst b/docs/sources/use/working_with_links_names.rst index 8c654cbdbc..1b0e9f6914 100644 --- a/docs/sources/use/working_with_links_names.rst +++ b/docs/sources/use/working_with_links_names.rst @@ -59,7 +59,7 @@ inter-container communication is set to false. For example, there is an image called ``crosbymichael/redis`` that exposes the port 6379 and starts the Redis server. Let's name the container as ``redis`` -based on that image and run it as daemon. +based on that image and run it as a daemon. .. code-block:: bash diff --git a/docs/sources/use/working_with_volumes.rst b/docs/sources/use/working_with_volumes.rst index 34728cbd3d..755be009e3 100644 --- a/docs/sources/use/working_with_volumes.rst +++ b/docs/sources/use/working_with_volumes.rst @@ -7,10 +7,6 @@ Share Directories via Volumes ============================= -.. versionadded:: v0.3.0 - Data volumes have been available since version 1 of the - :doc:`../reference/api/docker_remote_api` - A *data volume* is a specially-designated directory within one or more containers that bypasses the :ref:`ufs_def` to provide several useful features for persistent or shared data: @@ -24,9 +20,15 @@ features for persistent or shared data: * **Changes to a data volume will not be included at the next commit** because they are not recorded as regular filesystem changes in the top layer of the :ref:`ufs_def` +* **Volumes persist until no containers use them** as they are a reference + counted resource. The container does not need to be running to share its + volumes, but running it can help protect it against accidental removal + via ``docker rm``. Each container can have zero or more data volumes. +.. versionadded:: v0.3.0 + Getting Started ............... @@ -40,7 +42,7 @@ two new volumes:: This command will create the new container with two new volumes that exits instantly (``true`` is pretty much the smallest, simplest program that you can run). Once created you can mount its volumes in any other -container using the ``-volumes-from`` option; irrespecive of whether the +container using the ``-volumes-from`` option; irrespective of whether the container is running or not. Or, you can use the VOLUME instruction in a Dockerfile to add one or more new @@ -50,7 +52,7 @@ volumes to any container created from that image:: # RUN-USING: docker run -name DATA data FROM busybox VOLUME ["/var/volume1", "/var/volume2"] - CMD ["/usr/bin/true"] + CMD ["/bin/true"] Creating and mounting a Data Volume Container --------------------------------------------- @@ -80,7 +82,7 @@ similar to :ref:`ambassador_pattern_linking `. If you remove containers that mount volumes, including the initial DATA container, or the middleman, the volumes will not be deleted until there are no containers still -referencing those volumes. This allows you to upgrade, or effectivly migrate data volumes +referencing those volumes. This allows you to upgrade, or effectively migrate data volumes between containers. Mount a Host Directory as a Container Volume: @@ -90,6 +92,7 @@ Mount a Host Directory as a Container Volume: -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. +You must specify an absolute path for ``host-dir``. If ``host-dir`` is missing from the command, then docker creates a new volume. If ``host-dir`` is present but points to a non-existent directory on the host, Docker will automatically create this directory and use it as the source of the @@ -101,13 +104,55 @@ might not work on any other machine. For example:: - sudo docker run -v /var/logs:/var/host_logs:ro ubuntu bash + sudo docker run -t -i -v /var/logs:/var/host_logs:ro ubuntu bash The command above mounts the host directory ``/var/logs`` into the container with read only permissions as ``/var/host_logs``. .. versionadded:: v0.5.0 + +Note for OS/X users and remote daemon users: +-------------------------------------------- + +OS/X users run ``boot2docker`` to create a minimalist virtual machine running the docker daemon. That +virtual machine then launches docker commands on behalf of the OS/X command line. The means that ``host +directories`` refer to directories in the ``boot2docker`` virtual machine, not the OS/X filesystem. + +Similarly, anytime when the docker daemon is on a remote machine, the ``host directories`` always refer to directories on the daemon's machine. + +Backup, restore, or migrate data volumes +---------------------------------------- + +You cannot back up volumes using ``docker export``, ``docker save`` and ``docker cp`` +because they are external to images. +Instead you can use ``--volumes-from`` to start a new container that can access the +data-container's volume. For example:: + + $ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data + +* ``-rm`` - remove the container when it exits +* ``--volumes-from DATA`` - attach to the volumes shared by the ``DATA`` container +* ``-v $(pwd):/backup`` - bind mount the current directory into the container; to write the tar file to +* ``busybox`` - a small simpler image - good for quick maintenance +* ``tar cvf /backup/backup.tar /data`` - creates an uncompressed tar file of all the files in the ``/data`` directory + +Then to restore to the same container, or another that you've made elsewhere:: + + # create a new data container + $ sudo docker run -v /data -name DATA2 busybox true + # untar the backup files into the new container's data volume + $ sudo docker run -rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar + data/ + data/sven.txt + # compare to the original container + $ sudo docker run -rm --volumes-from DATA -v `pwd`:/backup busybox ls /data + sven.txt + + +You can use the basic techniques above to automate backup, migration and restore +testing using your preferred tools. + Known Issues ............ diff --git a/docs/sources/use/workingwithrepository.rst b/docs/sources/use/workingwithrepository.rst index 38062556cb..cbde932cde 100644 --- a/docs/sources/use/workingwithrepository.rst +++ b/docs/sources/use/workingwithrepository.rst @@ -7,9 +7,9 @@ Share Images via Repositories ============================= -A *repository* is a hosted collection of tagged :ref:`images -` that together create the file system for a container. The -repository's name is a tag that indicates the provenance of the +A *repository* is a shareable collection of tagged :ref:`images` +that together create the file systems for containers. The +repository's name is a label that indicates the provenance of the repository, i.e. who created it and where the original copy is located. @@ -19,7 +19,7 @@ tag. The implicit registry is located at ``index.docker.io``, the home of "top-level" repositories and the Central Index. This registry may also include public "user" repositories. -So Docker is not only a tool for creating and managing your own +Docker is not only a tool for creating and managing your own :ref:`containers ` -- **Docker is also a tool for sharing**. The Docker project provides a Central Registry to host public repositories, namespaced by user, and a Central Index which @@ -28,6 +28,12 @@ repositories. You can host your own Registry too! Docker acts as a client for these services via ``docker search, pull, login`` and ``push``. +Local Repositories +------------------ + +Docker images which have been created and labeled on your local Docker server +need to be pushed to a Public or Private registry to be shared. + .. _using_public_repositories: Public Repositories @@ -58,8 +64,8 @@ Find Public Images on the Central Index --------------------------------------- You can search the Central Index `online `_ -or by the CLI. Searching can find images by name, user name or -description: +or using the command line interface. Searching can find images by name, user +name or description: .. code-block:: bash @@ -136,13 +142,13 @@ name for the image. .. _image_push: -Pushing an image to its repository ----------------------------------- +Pushing a repository to its registry +------------------------------------ -In order to push an image to its repository you need to have committed -your container to a named image (see above) +In order to push an repository to its registry you need to have named an image, +or committed your container to a named image (see above) -Now you can commit this image to the repository designated by its name +Now you can push this repository to the registry designated by its name or tag. .. code-block:: bash @@ -156,7 +162,7 @@ Trusted Builds -------------- Trusted Builds automate the building and updating of images from GitHub, directly -on docker.io servers. It works by adding a commit hook to your selected repository, +on ``docker.io`` servers. It works by adding a commit hook to your selected repository, triggering a build and update when you push a commit. To setup a trusted build @@ -180,21 +186,22 @@ If you want to see the status of your Trusted Builds you can go to your `Trusted Builds page `_ on the Docker index, and it will show you the status of your builds, and the build history. -Once you've created a Trusted Build you can deactive or delete it. You cannot +Once you've created a Trusted Build you can deactivate or delete it. You cannot however push to a Trusted Build with the ``docker push`` command. You can only manage it by committing code to your GitHub repository. You can create multiple Trusted Builds per repository and configure them to point to specific ``Dockerfile``'s or Git branches. -Private Repositories --------------------- +Private Registry +---------------- -Right now (version 0.6), private repositories are only possible by -hosting `your own registry +Private registries and private shared repositories are +only possible by hosting `your own registry `_. To push or pull to a repository on your own registry, you must prefix the tag with the -address of the registry's host, like this: +address of the registry's host (a ``.`` or ``:`` is used to identify a host), +like this: .. code-block:: bash diff --git a/docs/theme/docker/layout.html b/docs/theme/docker/layout.html index a966556044..7d78fb9c3c 100755 --- a/docs/theme/docker/layout.html +++ b/docs/theme/docker/layout.html @@ -3,6 +3,7 @@ + {{ meta['title'] if meta and meta['title'] else title }} - Docker Documentation diff --git a/docs/theme/docker/static/css/main.css b/docs/theme/docker/static/css/main.css index 1195801542..ce4ba7b869 100755 --- a/docs/theme/docker/static/css/main.css +++ b/docs/theme/docker/static/css/main.css @@ -428,6 +428,9 @@ dt:hover > a.headerlink { float: right; visibility: hidden; } +h2, h3, h4, h5, h6 { + margin-top: 0.7em; +} /* ===================================== Miscellaneous information ====================================== */ diff --git a/docs/theme/docker/static/css/main.less b/docs/theme/docker/static/css/main.less index 8c9296d979..e248e21c08 100644 --- a/docs/theme/docker/static/css/main.less +++ b/docs/theme/docker/static/css/main.less @@ -631,6 +631,10 @@ dt:hover > a.headerlink { visibility: hidden; } +h2, h3, h4, h5, h6 { + margin-top: 0.7em; +} + /* ===================================== Miscellaneous information ====================================== */ diff --git a/engine/engine.go b/engine/engine.go index ec880b9c85..685924077c 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -1,13 +1,14 @@ package engine import ( + "bufio" "fmt" "github.com/dotcloud/docker/utils" "io" "log" "os" - "path/filepath" "runtime" + "sort" "strings" ) @@ -28,6 +29,10 @@ func Register(name string, handler Handler) error { return nil } +func unregister(name string) { + delete(globalHandlers, name) +} + // The Engine is the core of Docker. // It acts as a store for *containers*, and allows manipulation of these // containers by executing *jobs*. @@ -84,19 +89,6 @@ func New(root string) (*Engine, error) { return nil, err } - // Docker makes some assumptions about the "absoluteness" of root - // ... so let's make sure it has no symlinks - if p, err := filepath.Abs(root); err != nil { - log.Fatalf("Unable to get absolute root (%s): %s", root, err) - } else { - root = p - } - if p, err := filepath.EvalSymlinks(root); err != nil { - log.Fatalf("Unable to canonicalize root (%s): %s", root, err) - } else { - root = p - } - eng := &Engine{ root: root, handlers: make(map[string]Handler), @@ -105,6 +97,12 @@ func New(root string) (*Engine, error) { Stderr: os.Stderr, Stdin: os.Stdin, } + eng.Register("commands", func(job *Job) Status { + for _, name := range eng.commands() { + job.Printf("%s\n", name) + } + return StatusOK + }) // Copy existing global handlers for k, v := range globalHandlers { eng.handlers[k] = v @@ -116,6 +114,17 @@ func (eng *Engine) String() string { return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8]) } +// Commands returns a list of all currently registered commands, +// sorted alphabetically. +func (eng *Engine) commands() []string { + names := make([]string, 0, len(eng.handlers)) + for name := range eng.handlers { + names = append(names, name) + } + sort.Strings(names) + return names +} + // Job creates a new job which can later be executed. // This function mimics `Command` from the standard os/exec package. func (eng *Engine) Job(name string, args ...string) *Job { @@ -136,6 +145,48 @@ func (eng *Engine) Job(name string, args ...string) *Job { return job } +// ParseJob creates a new job from a text description using a shell-like syntax. +// +// The following syntax is used to parse `input`: +// +// * Words are separated using standard whitespaces as separators. +// * Quotes and backslashes are not interpreted. +// * Words of the form 'KEY=[VALUE]' are added to the job environment. +// * All other words are added to the job arguments. +// +// For example: +// +// job, _ := eng.ParseJob("VERBOSE=1 echo hello TEST=true world") +// +// The resulting job will have: +// job.Args={"echo", "hello", "world"} +// job.Env={"VERBOSE":"1", "TEST":"true"} +// +func (eng *Engine) ParseJob(input string) (*Job, error) { + // FIXME: use a full-featured command parser + scanner := bufio.NewScanner(strings.NewReader(input)) + scanner.Split(bufio.ScanWords) + var ( + cmd []string + env Env + ) + for scanner.Scan() { + word := scanner.Text() + kv := strings.SplitN(word, "=", 2) + if len(kv) == 2 { + env.Set(kv[0], kv[1]) + } else { + cmd = append(cmd, word) + } + } + if len(cmd) == 0 { + return nil, fmt.Errorf("empty command: '%s'", input) + } + job := eng.Job(cmd[0], cmd[1:]...) + job.Env().Init(&env) + return job, nil +} + func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) { if os.Getenv("TEST") == "" { prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) diff --git a/engine/engine_test.go b/engine/engine_test.go index 065a19f492..a16c352678 100644 --- a/engine/engine_test.go +++ b/engine/engine_test.go @@ -1,9 +1,12 @@ package engine import ( + "bytes" "io/ioutil" "os" "path" + "path/filepath" + "strings" "testing" ) @@ -15,6 +18,8 @@ func TestRegister(t *testing.T) { if err := Register("dummy1", nil); err == nil { t.Fatalf("Expecting error, got none") } + // Register is global so let's cleanup to avoid conflicts + defer unregister("dummy1") eng := newTestEngine(t) @@ -31,6 +36,7 @@ func TestRegister(t *testing.T) { if err := eng.Register("dummy2", nil); err == nil { t.Fatalf("Expecting error, got none") } + defer unregister("dummy2") } func TestJob(t *testing.T) { @@ -47,6 +53,7 @@ func TestJob(t *testing.T) { } eng.Register("dummy2", h) + defer unregister("dummy2") job2 := eng.Job("dummy2", "--level=awesome") if job2.handler == nil { @@ -58,12 +65,42 @@ func TestJob(t *testing.T) { } } +func TestEngineCommands(t *testing.T) { + eng := newTestEngine(t) + defer os.RemoveAll(eng.Root()) + handler := func(job *Job) Status { return StatusOK } + eng.Register("foo", handler) + eng.Register("bar", handler) + eng.Register("echo", handler) + eng.Register("die", handler) + var output bytes.Buffer + commands := eng.Job("commands") + commands.Stdout.Add(&output) + commands.Run() + expected := "bar\ncommands\ndie\necho\nfoo\n" + if result := output.String(); result != expected { + t.Fatalf("Unexpected output:\nExpected = %v\nResult = %v\n", expected, result) + } +} + func TestEngineRoot(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-test-TestEngineCreateDir") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) + // We expect Root to resolve to an absolute path. + // FIXME: this should not be necessary. + // Until the above FIXME is implemented, let's check for the + // current behavior. + tmp, err = filepath.EvalSymlinks(tmp) + if err != nil { + t.Fatal(err) + } + tmp, err = filepath.Abs(tmp) + if err != nil { + t.Fatal(err) + } dir := path.Join(tmp, "dir") eng, err := New(dir) if err != nil { @@ -101,3 +138,40 @@ func TestEngineLogf(t *testing.T) { t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n) } } + +func TestParseJob(t *testing.T) { + eng := newTestEngine(t) + defer os.RemoveAll(eng.Root()) + // Verify that the resulting job calls to the right place + var called bool + eng.Register("echo", func(job *Job) Status { + called = true + return StatusOK + }) + input := "echo DEBUG=1 hello world VERBOSITY=42" + job, err := eng.ParseJob(input) + if err != nil { + t.Fatal(err) + } + if job.Name != "echo" { + t.Fatalf("Invalid job name: %v", job.Name) + } + if strings.Join(job.Args, ":::") != "hello:::world" { + t.Fatalf("Invalid job args: %v", job.Args) + } + if job.Env().Get("DEBUG") != "1" { + t.Fatalf("Invalid job env: %v", job.Env) + } + if job.Env().Get("VERBOSITY") != "42" { + t.Fatalf("Invalid job env: %v", job.Env) + } + if len(job.Env().Map()) != 2 { + t.Fatalf("Invalid job env: %v", job.Env) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("Job was not called") + } +} diff --git a/engine/env.go b/engine/env.go index ce8c34bb24..c43a5ec971 100644 --- a/engine/env.go +++ b/engine/env.go @@ -36,6 +36,13 @@ func (env *Env) Exists(key string) bool { return exists } +func (env *Env) Init(src *Env) { + (*env) = make([]string, 0, len(*src)) + for _, val := range *src { + (*env) = append((*env), val) + } +} + func (env *Env) GetBool(key string) (value bool) { s := strings.ToLower(strings.Trim(env.Get(key), " \t")) if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { diff --git a/engine/job.go b/engine/job.go index 1f35ac85ff..e83e18e4d7 100644 --- a/engine/job.go +++ b/engine/job.go @@ -74,7 +74,7 @@ func (job *Job) Run() error { return err } if job.status != 0 { - return fmt.Errorf("%s: %s", job.Name, errorMessage) + return fmt.Errorf("%s", errorMessage) } return nil } @@ -102,6 +102,10 @@ func (job *Job) String() string { return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString()) } +func (job *Job) Env() *Env { + return job.env +} + func (job *Job) EnvExists(key string) (value bool) { return job.env.Exists(key) } @@ -197,11 +201,14 @@ func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { } func (job *Job) Errorf(format string, args ...interface{}) Status { + if format[len(format)-1] != '\n' { + format = format + "\n" + } fmt.Fprintf(job.Stderr, format, args...) return StatusErr } func (job *Job) Error(err error) Status { - fmt.Fprintf(job.Stderr, "%s", err) + fmt.Fprintf(job.Stderr, "%s\n", err) return StatusErr } diff --git a/execdriver/chroot/driver.go b/execdriver/chroot/driver.go deleted file mode 100644 index 396df87bad..0000000000 --- a/execdriver/chroot/driver.go +++ /dev/null @@ -1,101 +0,0 @@ -package chroot - -import ( - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/mount" - "os" - "os/exec" - "syscall" -) - -const ( - DriverName = "chroot" - Version = "0.1" -) - -func init() { - execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { - if err := mount.ForceMount("proc", "proc", "proc", ""); err != nil { - return err - } - defer mount.ForceUnmount("proc") - cmd := exec.Command(args.Args[0], args.Args[1:]...) - - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - cmd.Stdin = os.Stdin - - return cmd.Run() - }) -} - -type driver struct { -} - -func NewDriver() (*driver, error) { - return &driver{}, nil -} - -func (d *driver) Run(c *execdriver.Command, startCallback execdriver.StartCallback) (int, error) { - params := []string{ - "chroot", - c.Rootfs, - "/.dockerinit", - "-driver", - DriverName, - } - params = append(params, c.Entrypoint) - params = append(params, c.Arguments...) - - var ( - name = params[0] - arg = params[1:] - ) - aname, err := exec.LookPath(name) - if err != nil { - aname = name - } - c.Path = aname - c.Args = append([]string{name}, arg...) - - if err := c.Start(); err != nil { - return -1, err - } - - if startCallback != nil { - startCallback(c) - } - - err = c.Wait() - return getExitCode(c), err -} - -/// Return the exit code of the process -// if the process has not exited -1 will be returned -func getExitCode(c *execdriver.Command) int { - if c.ProcessState == nil { - return -1 - } - return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() -} - -func (d *driver) Kill(p *execdriver.Command, sig int) error { - return p.Process.Kill() -} - -func (d *driver) Restore(c *execdriver.Command) error { - panic("Not Implemented") -} - -func (d *driver) Info(id string) execdriver.Info { - panic("Not implemented") -} - -func (d *driver) Name() string { - return fmt.Sprintf("%s-%s", DriverName, Version) -} - -func (d *driver) GetPidsForContainer(id string) ([]int, error) { - return nil, fmt.Errorf("Not supported") -} diff --git a/execdriver/driver.go b/execdriver/driver.go index 1ea086075d..ec8f48f52d 100644 --- a/execdriver/driver.go +++ b/execdriver/driver.go @@ -2,6 +2,8 @@ package execdriver import ( "errors" + "io" + "os" "os/exec" ) @@ -49,6 +51,9 @@ type InitArgs struct { Args []string Mtu int Driver string + Console string + Pipe int + Root string } // Driver specific information based on @@ -57,10 +62,21 @@ type Info interface { IsRunning() bool } +// Terminal in an interface for drivers to implement +// if they want to support Close and Resize calls from +// the core +type Terminal interface { + io.Closer + Resize(height, width int) error +} + +type TtyTerminal interface { + Master() *os.File +} + type Driver interface { - Run(c *Command, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code + Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code Kill(c *Command, sig int) error - Restore(c *Command) error // Wait and try to re-attach on an out of process command Name() string // Driver name Info(id string) Info // "temporary" hack (until we move state from core to plugins) GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. @@ -82,7 +98,6 @@ type Resources struct { } // Process wrapps an os/exec.Cmd to add more metadata -// TODO: Rename to Command type Command struct { exec.Cmd `json:"-"` @@ -99,13 +114,14 @@ type Command struct { Network *Network `json:"network"` // if network is nil then networking is disabled Config []string `json:"config"` // generic values that specific drivers can consume Resources *Resources `json:"resources"` + + Terminal Terminal `json:"-"` // standard or tty terminal + Console string `json:"-"` // dev/console path + ContainerPid int `json:"container_pid"` // the pid for the process inside a container } // Return the pid of the process // If the process is nil -1 will be returned func (c *Command) Pid() int { - if c.Process == nil { - return -1 - } - return c.Process.Pid + return c.ContainerPid } diff --git a/execdriver/lxc/driver.go b/execdriver/lxc/driver.go index 4c3979e718..765a52ee43 100644 --- a/execdriver/lxc/driver.go +++ b/execdriver/lxc/driver.go @@ -76,7 +76,10 @@ func (d *driver) Name() string { return fmt.Sprintf("%s-%s", DriverName, version) } -func (d *driver) Run(c *execdriver.Command, startCallback execdriver.StartCallback) (int, error) { +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + if err := execdriver.SetTerminal(c, pipes); err != nil { + return -1, err + } configPath, err := d.generateLXCConfig(c) if err != nil { return -1, err @@ -163,9 +166,11 @@ func (d *driver) Run(c *execdriver.Command, startCallback execdriver.StartCallba }() // Poll lxc for RUNNING status - if err := d.waitForStart(c, waitLock); err != nil { + pid, err := d.waitForStart(c, waitLock) + if err != nil { return -1, err } + c.ContainerPid = pid if startCallback != nil { startCallback(c) @@ -186,43 +191,39 @@ func getExitCode(c *execdriver.Command) int { } func (d *driver) Kill(c *execdriver.Command, sig int) error { - return d.kill(c, sig) -} - -func (d *driver) Restore(c *execdriver.Command) error { - for { - output, err := exec.Command("lxc-info", "-n", c.ID).CombinedOutput() - if err != nil { - return err - } - if !strings.Contains(string(output), "RUNNING") { - return nil - } - time.Sleep(500 * time.Millisecond) - } + return KillLxc(c.ID, sig) } func (d *driver) version() string { - version := "" - if output, err := exec.Command("lxc-version").CombinedOutput(); err == nil { - outputStr := string(output) - if len(strings.SplitN(outputStr, ":", 2)) == 2 { - version = strings.TrimSpace(strings.SplitN(outputStr, ":", 2)[1]) + var ( + version string + output []byte + err error + ) + if _, errPath := exec.LookPath("lxc-version"); errPath == nil { + output, err = exec.Command("lxc-version").CombinedOutput() + } else { + output, err = exec.Command("lxc-start", "--version").CombinedOutput() + } + if err == nil { + version = strings.TrimSpace(string(output)) + if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { + version = strings.TrimSpace(parts[1]) } } return version } -func (d *driver) kill(c *execdriver.Command, sig int) error { +func KillLxc(id string, sig int) error { var ( err error output []byte ) _, err = exec.LookPath("lxc-kill") if err == nil { - output, err = exec.Command("lxc-kill", "-n", c.ID, strconv.Itoa(sig)).CombinedOutput() + output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() } else { - output, err = exec.Command("lxc-stop", "-k", "-n", c.ID, strconv.Itoa(sig)).CombinedOutput() + output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput() } if err != nil { return fmt.Errorf("Err: %s Output: %s", err, output) @@ -230,7 +231,8 @@ func (d *driver) kill(c *execdriver.Command, sig int) error { return nil } -func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) error { +// wait for the process to start and return the pid for the process +func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { var ( err error output []byte @@ -243,10 +245,7 @@ func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) err select { case <-waitLock: // If the process dies while waiting for it, just return - return nil - if c.ProcessState != nil && c.ProcessState.Exited() { - return nil - } + return -1, nil default: } @@ -254,19 +253,23 @@ func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) err if err != nil { output, err = d.getInfo(c.ID) if err != nil { - return err + return -1, err } } - if strings.Contains(string(output), "RUNNING") { - return nil + info, err := parseLxcInfo(string(output)) + if err != nil { + return -1, err + } + if info.Running { + return info.Pid, nil } time.Sleep(50 * time.Millisecond) } - return execdriver.ErrNotRunning + return -1, execdriver.ErrNotRunning } func (d *driver) getInfo(id string) ([]byte, error) { - return exec.Command("lxc-info", "-s", "-n", id).CombinedOutput() + return exec.Command("lxc-info", "-n", id).CombinedOutput() } type info struct { @@ -279,7 +282,8 @@ func (i *info) IsRunning() bool { output, err := i.driver.getInfo(i.ID) if err != nil { - panic(err) + utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) + return false } if strings.Contains(string(output), "RUNNING") { running = true @@ -297,9 +301,8 @@ func (d *driver) Info(id string) execdriver.Info { func (d *driver) GetPidsForContainer(id string) ([]int, error) { pids := []int{} - // memory is chosen randomly, any cgroup used by docker works - subsystem := "memory" - + // cpu is chosen because it is the only non optional subsystem in cgroups + subsystem := "cpu" cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) if err != nil { return pids, err diff --git a/execdriver/lxc/info.go b/execdriver/lxc/info.go new file mode 100644 index 0000000000..3b2ea0d07f --- /dev/null +++ b/execdriver/lxc/info.go @@ -0,0 +1,50 @@ +package lxc + +import ( + "bufio" + "errors" + "strconv" + "strings" +) + +var ( + ErrCannotParse = errors.New("cannot parse raw input") +) + +type lxcInfo struct { + Running bool + Pid int +} + +func parseLxcInfo(raw string) (*lxcInfo, error) { + if raw == "" { + return nil, ErrCannotParse + } + var ( + err error + s = bufio.NewScanner(strings.NewReader(raw)) + info = &lxcInfo{} + ) + for s.Scan() { + text := s.Text() + + if s.Err() != nil { + return nil, s.Err() + } + + parts := strings.Split(text, ":") + if len(parts) < 2 { + continue + } + switch strings.TrimSpace(parts[0]) { + case "state": + info.Running = strings.TrimSpace(parts[1]) == "RUNNING" + case "pid": + info.Pid, err = strconv.Atoi(strings.TrimSpace(parts[1])) + if err != nil { + return nil, err + } + } + } + return info, nil +} diff --git a/execdriver/lxc/info_test.go b/execdriver/lxc/info_test.go new file mode 100644 index 0000000000..edafc02511 --- /dev/null +++ b/execdriver/lxc/info_test.go @@ -0,0 +1,36 @@ +package lxc + +import ( + "testing" +) + +func TestParseRunningInfo(t *testing.T) { + raw := ` + state: RUNNING + pid: 50` + + info, err := parseLxcInfo(raw) + if err != nil { + t.Fatal(err) + } + if !info.Running { + t.Fatal("info should return a running state") + } + if info.Pid != 50 { + t.Fatalf("info should have pid 50 got %d", info.Pid) + } +} + +func TestEmptyInfo(t *testing.T) { + _, err := parseLxcInfo("") + if err == nil { + t.Fatal("error should not be nil") + } +} + +func TestBadInfo(t *testing.T) { + _, err := parseLxcInfo("state") + if err != nil { + t.Fatal(err) + } +} diff --git a/execdriver/lxc/init.go b/execdriver/lxc/init.go index 7c2b039c50..e138915212 100644 --- a/execdriver/lxc/init.go +++ b/execdriver/lxc/init.go @@ -4,11 +4,10 @@ import ( "fmt" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/pkg/user" "github.com/syndtr/gocapability/capability" "net" "os" - "strconv" "strings" "syscall" ) @@ -79,35 +78,28 @@ func setupWorkingDirectory(args *execdriver.InitArgs) error { // Takes care of dropping privileges to the desired user func changeUser(args *execdriver.InitArgs) error { - if args.User == "" { - return nil - } - userent, err := utils.UserLookup(args.User) + uid, gid, suppGids, err := user.GetUserGroupSupplementary( + args.User, + syscall.Getuid(), syscall.Getgid(), + ) if err != nil { - return fmt.Errorf("Unable to find user %v: %v", args.User, err) + return err } - uid, err := strconv.Atoi(userent.Uid) - if err != nil { - return fmt.Errorf("Invalid uid: %v", userent.Uid) + if err := syscall.Setgroups(suppGids); err != nil { + return fmt.Errorf("Setgroups failed: %v", err) } - gid, err := strconv.Atoi(userent.Gid) - if err != nil { - return fmt.Errorf("Invalid gid: %v", userent.Gid) - } - if err := syscall.Setgid(gid); err != nil { - return fmt.Errorf("setgid failed: %v", err) + return fmt.Errorf("Setgid failed: %v", err) } if err := syscall.Setuid(uid); err != nil { - return fmt.Errorf("setuid failed: %v", err) + return fmt.Errorf("Setuid failed: %v", err) } return nil } func setupCapabilities(args *execdriver.InitArgs) error { - if args.Privileged { return nil } @@ -127,6 +119,7 @@ func setupCapabilities(args *execdriver.InitArgs) error { capability.CAP_AUDIT_CONTROL, capability.CAP_MAC_OVERRIDE, capability.CAP_MAC_ADMIN, + capability.CAP_NET_ADMIN, } c, err := capability.NewPid(os.Getpid()) diff --git a/execdriver/lxc/lxc_template.go b/execdriver/lxc/lxc_template.go index 705bdf5363..1181396a18 100644 --- a/execdriver/lxc/lxc_template.go +++ b/execdriver/lxc/lxc_template.go @@ -12,9 +12,11 @@ const LxcTemplate = ` lxc.network.type = veth lxc.network.link = {{.Network.Bridge}} lxc.network.name = eth0 +lxc.network.mtu = {{.Network.Mtu}} {{else}} # network is disabled (-n=false) lxc.network.type = empty +lxc.network.flags = up {{end}} # root filesystem @@ -79,6 +81,10 @@ lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noex # if your userspace allows it. eg. see http://bit.ly/T9CkqJ lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 +{{if .Tty}} +lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 +{{end}} + lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0 lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0 diff --git a/execdriver/native/default_template.go b/execdriver/native/default_template.go new file mode 100644 index 0000000000..6e7d597b7b --- /dev/null +++ b/execdriver/native/default_template.go @@ -0,0 +1,90 @@ +package native + +import ( + "fmt" + "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer" + "os" +) + +// createContainer populates and configures the container type with the +// data provided by the execdriver.Command +func createContainer(c *execdriver.Command) *libcontainer.Container { + container := getDefaultTemplate() + + container.Hostname = getEnv("HOSTNAME", c.Env) + container.Tty = c.Tty + container.User = c.User + container.WorkingDir = c.WorkingDir + container.Env = c.Env + + if c.Network != nil { + container.Networks = []*libcontainer.Network{ + { + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), + Gateway: c.Network.Gateway, + Type: "veth", + Context: libcontainer.Context{ + "prefix": "veth", + "bridge": c.Network.Bridge, + }, + }, + } + } + + container.Cgroups.Name = c.ID + if c.Privileged { + container.Capabilities = nil + container.Cgroups.DeviceAccess = true + container.Context["apparmor_profile"] = "unconfined" + } + if c.Resources != nil { + container.Cgroups.CpuShares = c.Resources.CpuShares + container.Cgroups.Memory = c.Resources.Memory + container.Cgroups.MemorySwap = c.Resources.MemorySwap + } + // check to see if we are running in ramdisk to disable pivot root + container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" + + return container +} + +// getDefaultTemplate returns the docker default for +// the libcontainer configuration file +func getDefaultTemplate() *libcontainer.Container { + return &libcontainer.Container{ + Capabilities: libcontainer.Capabilities{ + libcontainer.GetCapability("SETPCAP"), + libcontainer.GetCapability("SYS_MODULE"), + libcontainer.GetCapability("SYS_RAWIO"), + libcontainer.GetCapability("SYS_PACCT"), + libcontainer.GetCapability("SYS_ADMIN"), + libcontainer.GetCapability("SYS_NICE"), + libcontainer.GetCapability("SYS_RESOURCE"), + libcontainer.GetCapability("SYS_TIME"), + libcontainer.GetCapability("SYS_TTY_CONFIG"), + libcontainer.GetCapability("MKNOD"), + libcontainer.GetCapability("AUDIT_WRITE"), + libcontainer.GetCapability("AUDIT_CONTROL"), + libcontainer.GetCapability("MAC_OVERRIDE"), + libcontainer.GetCapability("MAC_ADMIN"), + libcontainer.GetCapability("NET_ADMIN"), + }, + Namespaces: libcontainer.Namespaces{ + libcontainer.GetNamespace("NEWNS"), + libcontainer.GetNamespace("NEWUTS"), + libcontainer.GetNamespace("NEWIPC"), + libcontainer.GetNamespace("NEWPID"), + libcontainer.GetNamespace("NEWNET"), + }, + Cgroups: &cgroups.Cgroup{ + Parent: "docker", + DeviceAccess: false, + }, + Context: libcontainer.Context{ + "apparmor_profile": "docker-default", + }, + } +} diff --git a/execdriver/native/driver.go b/execdriver/native/driver.go new file mode 100644 index 0000000000..452e802523 --- /dev/null +++ b/execdriver/native/driver.go @@ -0,0 +1,251 @@ +package native + +import ( + "encoding/json" + "fmt" + "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/apparmor" + "github.com/dotcloud/docker/pkg/libcontainer/nsinit" + "github.com/dotcloud/docker/pkg/system" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" +) + +const ( + DriverName = "native" + Version = "0.1" +) + +func init() { + execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { + var ( + container *libcontainer.Container + ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}) + ) + f, err := os.Open(filepath.Join(args.Root, "container.json")) + if err != nil { + return err + } + if err := json.NewDecoder(f).Decode(&container); err != nil { + f.Close() + return err + } + f.Close() + + cwd, err := os.Getwd() + if err != nil { + return err + } + syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(args.Pipe)) + if err != nil { + return err + } + if err := ns.Init(container, cwd, args.Console, syncPipe, args.Args); err != nil { + return err + } + return nil + }) +} + +type driver struct { + root string +} + +func NewDriver(root string) (*driver, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + if err := apparmor.InstallDefaultProfile(); err != nil { + return nil, err + } + return &driver{ + root: root, + }, nil +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + if err := d.validateCommand(c); err != nil { + return -1, err + } + var ( + term nsinit.Terminal + container = createContainer(c) + factory = &dockerCommandFactory{c: c, driver: d} + stateWriter = &dockerStateWriter{ + callback: startCallback, + c: c, + dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)}, + } + ns = nsinit.NewNsInit(factory, stateWriter) + args = append([]string{c.Entrypoint}, c.Arguments...) + ) + if err := d.createContainerRoot(c.ID); err != nil { + return -1, err + } + defer d.removeContainerRoot(c.ID) + + if c.Tty { + term = &dockerTtyTerm{ + pipes: pipes, + } + } else { + term = &dockerStdTerm{ + pipes: pipes, + } + } + c.Terminal = term + if err := d.writeContainerFile(container, c.ID); err != nil { + return -1, err + } + return ns.Exec(container, term, args) +} + +func (d *driver) Kill(p *execdriver.Command, sig int) error { + err := syscall.Kill(p.Process.Pid, syscall.Signal(sig)) + d.removeContainerRoot(p.ID) + return err +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (d *driver) Name() string { + return fmt.Sprintf("%s-%s", DriverName, Version) +} + +// TODO: this can be improved with our driver +// there has to be a better way to do this +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + pids := []int{} + + subsystem := "devices" + cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return pids, err + } + cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) + if err != nil { + return pids, err + } + + filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") + if _, err := os.Stat(filename); os.IsNotExist(err) { + filename = filepath.Join(cgroupRoot, cgroupDir, "docker", id, "tasks") + } + + output, err := ioutil.ReadFile(filename) + if err != nil { + return pids, err + } + for _, p := range strings.Split(string(output), "\n") { + if len(p) == 0 { + continue + } + pid, err := strconv.Atoi(p) + if err != nil { + return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) + } + pids = append(pids, pid) + } + return pids, nil +} + +func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error { + data, err := json.Marshal(container) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655) +} + +func (d *driver) createContainerRoot(id string) error { + return os.MkdirAll(filepath.Join(d.root, id), 0655) +} + +func (d *driver) removeContainerRoot(id string) error { + return os.RemoveAll(filepath.Join(d.root, id)) +} + +func (d *driver) validateCommand(c *execdriver.Command) error { + // we need to check the Config of the command to make sure that we + // do not have any of the lxc-conf variables + for _, conf := range c.Config { + if strings.Contains(conf, "lxc") { + return fmt.Errorf("%s is not supported by the native driver", conf) + } + } + return nil +} + +func getEnv(key string, env []string) string { + for _, pair := range env { + parts := strings.Split(pair, "=") + if parts[0] == key { + return parts[1] + } + } + return "" +} + +type dockerCommandFactory struct { + c *execdriver.Command + driver *driver +} + +// createCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces +// defined on the container's configuration and use the current binary as the init with the +// args provided +func (d *dockerCommandFactory) Create(container *libcontainer.Container, console string, syncFile *os.File, args []string) *exec.Cmd { + // we need to join the rootfs because nsinit will setup the rootfs and chroot + initPath := filepath.Join(d.c.Rootfs, d.c.InitPath) + + d.c.Path = initPath + d.c.Args = append([]string{ + initPath, + "-driver", DriverName, + "-console", console, + "-pipe", "3", + "-root", filepath.Join(d.driver.root, d.c.ID), + "--", + }, args...) + + // set this to nil so that when we set the clone flags anything else is reset + d.c.SysProcAttr = nil + system.SetCloneFlags(&d.c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) + d.c.ExtraFiles = []*os.File{syncFile} + + d.c.Env = container.Env + d.c.Dir = d.c.Rootfs + + return &d.c.Cmd +} + +type dockerStateWriter struct { + dsw nsinit.StateWriter + c *execdriver.Command + callback execdriver.StartCallback +} + +func (d *dockerStateWriter) WritePid(pid int) error { + d.c.ContainerPid = pid + err := d.dsw.WritePid(pid) + if d.callback != nil { + d.callback(d.c) + } + return err +} + +func (d *dockerStateWriter) DeletePid() error { + return d.dsw.DeletePid() +} diff --git a/execdriver/native/info.go b/execdriver/native/info.go new file mode 100644 index 0000000000..aef2f85c6b --- /dev/null +++ b/execdriver/native/info.go @@ -0,0 +1,21 @@ +package native + +import ( + "os" + "path/filepath" +) + +type info struct { + ID string + driver *driver +} + +// IsRunning is determined by looking for the +// pid file for a container. If the file exists then the +// container is currently running +func (i *info) IsRunning() bool { + if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil { + return true + } + return false +} diff --git a/execdriver/native/term.go b/execdriver/native/term.go new file mode 100644 index 0000000000..ec69820f75 --- /dev/null +++ b/execdriver/native/term.go @@ -0,0 +1,42 @@ +/* + These types are wrappers around the libcontainer Terminal interface so that + we can resuse the docker implementations where possible. +*/ +package native + +import ( + "github.com/dotcloud/docker/execdriver" + "io" + "os" + "os/exec" +) + +type dockerStdTerm struct { + execdriver.StdConsole + pipes *execdriver.Pipes +} + +func (d *dockerStdTerm) Attach(cmd *exec.Cmd) error { + return d.AttachPipes(cmd, d.pipes) +} + +func (d *dockerStdTerm) SetMaster(master *os.File) { + // do nothing +} + +type dockerTtyTerm struct { + execdriver.TtyConsole + pipes *execdriver.Pipes +} + +func (t *dockerTtyTerm) Attach(cmd *exec.Cmd) error { + go io.Copy(t.pipes.Stdout, t.MasterPty) + if t.pipes.Stdin != nil { + go io.Copy(t.MasterPty, t.pipes.Stdin) + } + return nil +} + +func (t *dockerTtyTerm) SetMaster(master *os.File) { + t.MasterPty = master +} diff --git a/execdriver/pipes.go b/execdriver/pipes.go new file mode 100644 index 0000000000..158219f0c5 --- /dev/null +++ b/execdriver/pipes.go @@ -0,0 +1,23 @@ +package execdriver + +import ( + "io" +) + +// Pipes is a wrapper around a containers output for +// stdin, stdout, stderr +type Pipes struct { + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { + p := &Pipes{ + Stdout: stdout, + Stderr: stderr, + } + if useStdin { + p.Stdin = stdin + } + return p +} diff --git a/execdriver/termconsole.go b/execdriver/termconsole.go new file mode 100644 index 0000000000..af6b88d3d1 --- /dev/null +++ b/execdriver/termconsole.go @@ -0,0 +1,126 @@ +package execdriver + +import ( + "github.com/dotcloud/docker/pkg/term" + "github.com/kr/pty" + "io" + "os" + "os/exec" +) + +func SetTerminal(command *Command, pipes *Pipes) error { + var ( + term Terminal + err error + ) + if command.Tty { + term, err = NewTtyConsole(command, pipes) + } else { + term, err = NewStdConsole(command, pipes) + } + if err != nil { + return err + } + command.Terminal = term + return nil +} + +type TtyConsole struct { + MasterPty *os.File + SlavePty *os.File +} + +func NewTtyConsole(command *Command, pipes *Pipes) (*TtyConsole, error) { + ptyMaster, ptySlave, err := pty.Open() + if err != nil { + return nil, err + } + tty := &TtyConsole{ + MasterPty: ptyMaster, + SlavePty: ptySlave, + } + if err := tty.AttachPipes(&command.Cmd, pipes); err != nil { + tty.Close() + return nil, err + } + command.Console = tty.SlavePty.Name() + return tty, nil +} + +func (t *TtyConsole) Master() *os.File { + return t.MasterPty +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { + command.Stdout = t.SlavePty + command.Stderr = t.SlavePty + + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + io.Copy(pipes.Stdout, t.MasterPty) + }() + + if pipes.Stdin != nil { + command.Stdin = t.SlavePty + command.SysProcAttr.Setctty = true + + go func() { + defer pipes.Stdin.Close() + io.Copy(t.MasterPty, pipes.Stdin) + }() + } + return nil +} + +func (t *TtyConsole) Close() error { + t.SlavePty.Close() + return t.MasterPty.Close() +} + +type StdConsole struct { +} + +func NewStdConsole(command *Command, pipes *Pipes) (*StdConsole, error) { + std := &StdConsole{} + + if err := std.AttachPipes(&command.Cmd, pipes); err != nil { + return nil, err + } + return std, nil +} + +func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { + command.Stdout = pipes.Stdout + command.Stderr = pipes.Stderr + + if pipes.Stdin != nil { + stdin, err := command.StdinPipe() + if err != nil { + return err + } + + go func() { + defer stdin.Close() + io.Copy(stdin, pipes.Stdin) + }() + } + return nil +} + +func (s *StdConsole) Resize(h, w int) error { + // we do not need to reside a non tty + return nil +} + +func (s *StdConsole) Close() error { + // nothing to close here + return nil +} diff --git a/graph.go b/graph.go index 42da42c8af..43af2c278a 100644 --- a/graph.go +++ b/graph.go @@ -3,7 +3,9 @@ package docker import ( "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -125,12 +127,12 @@ func (graph *Graph) Get(name string) (*Image, error) { } // Create creates a new image and registers it in the graph. -func (graph *Graph) Create(layerData archive.Archive, container *Container, comment, author string, config *Config) (*Image, error) { +func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container, comment, author string, config *runconfig.Config) (*Image, error) { img := &Image{ ID: GenerateID(), Comment: comment, Created: time.Now().UTC(), - DockerVersion: VERSION, + DockerVersion: dockerversion.VERSION, Author: author, Config: config, Architecture: runtime.GOARCH, @@ -149,7 +151,7 @@ func (graph *Graph) Create(layerData archive.Archive, container *Container, comm // Register imports a pre-existing image into the graph. // FIXME: pass img as first argument -func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Image) (err error) { +func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *Image) (err error) { defer func() { // If any error occurs, remove the new dir from the driver. // Don't check for errors since the dir might not have been created. @@ -224,7 +226,9 @@ func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, if err != nil { return nil, err } - return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf, false, utils.TruncateID(id), "Buffering to disk"), tmp) + progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk") + defer progress.Close() + return archive.NewTempArchive(progress, tmp) } // Mktemp creates a temporary sub-directory inside the graph's filesystem. @@ -253,6 +257,7 @@ func setupInitLayer(initLayer string) error { "/etc/resolv.conf": "file", "/etc/hosts": "file", "/etc/hostname": "file", + "/dev/console": "file", // "var/run": "dir", // "var/lock": "dir", } { diff --git a/graphdriver/aufs/aufs.go b/graphdriver/aufs/aufs.go index d1cf87d1a0..a15cf6b273 100644 --- a/graphdriver/aufs/aufs.go +++ b/graphdriver/aufs/aufs.go @@ -34,6 +34,10 @@ import ( "sync" ) +var ( + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") +) + func init() { graphdriver.Register("aufs", Init) } @@ -100,7 +104,7 @@ func supportsAufs() error { return nil } } - return fmt.Errorf("AUFS was not found in /proc/filesystems") + return ErrAufsNotSupported } func (a Driver) rootPath() string { @@ -271,7 +275,7 @@ func (a *Driver) Diff(id string) (archive.Archive, error) { }) } -func (a *Driver) ApplyDiff(id string, diff archive.Archive) error { +func (a *Driver) ApplyDiff(id string, diff archive.ArchiveReader) error { return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) } diff --git a/graphdriver/aufs/aufs_test.go b/graphdriver/aufs/aufs_test.go index c43bd74038..6002bec5a1 100644 --- a/graphdriver/aufs/aufs_test.go +++ b/graphdriver/aufs/aufs_test.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/graphdriver" "io/ioutil" "os" "path" @@ -15,15 +16,24 @@ var ( tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") ) +func testInit(dir string, t *testing.T) graphdriver.Driver { + d, err := Init(dir) + if err != nil { + if err == ErrAufsNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + func newDriver(t *testing.T) *Driver { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } - d, err := Init(tmp) - if err != nil { - t.Fatal(err) - } + d := testInit(tmp, t) return d.(*Driver) } @@ -32,10 +42,7 @@ func TestNewDriver(t *testing.T) { t.Fatal(err) } - d, err := Init(tmp) - if err != nil { - t.Fatal(err) - } + d := testInit(tmp, t) defer os.RemoveAll(tmp) if d == nil { t.Fatalf("Driver should not be nil") @@ -74,12 +81,8 @@ func TestNewDriverFromExistingDir(t *testing.T) { t.Fatal(err) } - if _, err := Init(tmp); err != nil { - t.Fatal(err) - } - if _, err := Init(tmp); err != nil { - t.Fatal(err) - } + testInit(tmp, t) + testInit(tmp, t) os.RemoveAll(tmp) } diff --git a/graphdriver/devmapper/deviceset.go b/graphdriver/devmapper/deviceset.go index 8432d92a4e..303e363e92 100644 --- a/graphdriver/devmapper/deviceset.go +++ b/graphdriver/devmapper/deviceset.go @@ -12,6 +12,7 @@ import ( "path" "path/filepath" "strconv" + "strings" "sync" "time" ) @@ -29,6 +30,15 @@ type DevInfo struct { TransactionId uint64 `json:"transaction_id"` Initialized bool `json:"initialized"` devices *DeviceSet `json:"-"` + + mountCount int `json:"-"` + mountPath string `json:"-"` + // A floating mount means one reference is not owned and + // will be stolen by the next mount. This allows us to + // avoid unmounting directly after creation before the + // first get (since we need to mount to set up the device + // a bit first). + floating bool `json:"-"` } type MetaData struct { @@ -43,7 +53,7 @@ type DeviceSet struct { TransactionId uint64 NewTransactionId uint64 nextFreeDevice int - activeMounts map[string]int + sawBusy bool } type DiskUsage struct { @@ -69,6 +79,14 @@ type DevStatus struct { HighestMappedSector uint64 } +type UnmountMode int + +const ( + UnmountRegular UnmountMode = iota + UnmountFloat + UnmountSink +) + func getDevName(name string) string { return "/dev/mapper/" + name } @@ -290,7 +308,7 @@ func (devices *DeviceSet) setupBaseImage() error { if oldInfo != nil && !oldInfo.Initialized { utils.Debugf("Removing uninitialized base image") - if err := devices.removeDevice(""); err != nil { + if err := devices.deleteDevice(""); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } @@ -355,6 +373,10 @@ func (devices *DeviceSet) log(level int, file string, line int, dmError int, mes return // Ignore _LOG_DEBUG } + if strings.Contains(message, "busy") { + devices.sawBusy = true + } + utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } @@ -562,7 +584,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return nil } -func (devices *DeviceSet) removeDevice(hash string) error { +func (devices *DeviceSet) deleteDevice(hash string) error { info := devices.Devices[hash] if info == nil { return fmt.Errorf("hash %s doesn't exists", hash) @@ -579,7 +601,7 @@ func (devices *DeviceSet) removeDevice(hash string) error { devinfo, _ := getInfo(info.Name()) if devinfo != nil && devinfo.Exists != 0 { - if err := removeDevice(info.Name()); err != nil { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { utils.Debugf("Error removing device: %s\n", err) return err } @@ -610,50 +632,87 @@ func (devices *DeviceSet) removeDevice(hash string) error { return nil } -func (devices *DeviceSet) RemoveDevice(hash string) error { +func (devices *DeviceSet) DeleteDevice(hash string) error { devices.Lock() defer devices.Unlock() - return devices.removeDevice(hash) + return devices.deleteDevice(hash) } -func (devices *DeviceSet) deactivateDevice(hash string) error { - utils.Debugf("[devmapper] deactivateDevice(%s)", hash) - defer utils.Debugf("[devmapper] deactivateDevice END") - var devname string - // FIXME: shouldn't we just register the pool into devices? - devname, err := devices.byHash(hash) - if err != nil { - return err - } +func (devices *DeviceSet) deactivatePool() error { + utils.Debugf("[devmapper] deactivatePool()") + defer utils.Debugf("[devmapper] deactivatePool END") + devname := devices.getPoolDevName() devinfo, err := getInfo(devname) if err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } if devinfo.Exists != 0 { - if err := removeDevice(devname); err != nil { + return removeDevice(devname) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(hash string) error { + utils.Debugf("[devmapper] deactivateDevice(%s)", hash) + defer utils.Debugf("[devmapper] deactivateDevice END") + + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + devinfo, err := getInfo(info.Name()) + if err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + if devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } - if err := devices.waitRemove(hash); err != nil { - return err - } } return nil } -// waitRemove blocks until either: -// a) the device registered at - is removed, -// or b) the 1 second timeout expires. -func (devices *DeviceSet) waitRemove(hash string) error { - utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, hash) - defer utils.Debugf("[deviceset %s] waitRemove(%) END", devices.devicePrefix, hash) - devname, err := devices.byHash(hash) +// Issues the underlying dm remove operation and then waits +// for it to finish. +func (devices *DeviceSet) removeDeviceAndWait(devname string) error { + var err error + + for i := 0; i < 10; i++ { + devices.sawBusy = false + err = removeDevice(devname) + if err == nil { + break + } + if !devices.sawBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + time.Sleep(5 * time.Millisecond) + } if err != nil { return err } + + if err := devices.waitRemove(devname); err != nil { + return err + } + return nil +} + +// waitRemove blocks until either: +// a) the device registered at - is removed, +// or b) the 1 second timeout expires. +func (devices *DeviceSet) waitRemove(devname string) error { + utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) + defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) i := 0 for ; i < 1000; i += 1 { devinfo, err := getInfo(devname) @@ -681,18 +740,18 @@ func (devices *DeviceSet) waitRemove(hash string) error { // a) the device registered at - is closed, // or b) the 1 second timeout expires. func (devices *DeviceSet) waitClose(hash string) error { - devname, err := devices.byHash(hash) - if err != nil { - return err + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) } i := 0 for ; i < 1000; i += 1 { - devinfo, err := getInfo(devname) + devinfo, err := getInfo(info.Name()) if err != nil { return err } if i%100 == 0 { - utils.Debugf("Waiting for unmount of %s: opencount=%d", devname, devinfo.OpenCount) + utils.Debugf("Waiting for unmount of %s: opencount=%d", hash, devinfo.OpenCount) } if devinfo.OpenCount == 0 { break @@ -700,26 +759,11 @@ func (devices *DeviceSet) waitClose(hash string) error { time.Sleep(1 * time.Millisecond) } if i == 1000 { - return fmt.Errorf("Timeout while waiting for device %s to close", devname) + return fmt.Errorf("Timeout while waiting for device %s to close", hash) } return nil } -// byHash is a hack to allow looking up the deviceset's pool by the hash "pool". -// FIXME: it seems probably cleaner to register the pool in devices.Devices, -// but I am afraid of arcane implications deep in the devicemapper code, -// so this will do. -func (devices *DeviceSet) byHash(hash string) (devname string, err error) { - if hash == "pool" { - return devices.getPoolDevName(), nil - } - info := devices.Devices[hash] - if info == nil { - return "", fmt.Errorf("hash %s doesn't exists", hash) - } - return info.Name(), nil -} - func (devices *DeviceSet) Shutdown() error { devices.Lock() defer devices.Unlock() @@ -728,13 +772,12 @@ func (devices *DeviceSet) Shutdown() error { utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) - for path, count := range devices.activeMounts { - for i := count; i > 0; i-- { - if err := sysUnmount(path, 0); err != nil { - utils.Debugf("Shutdown unmounting %s, error: %s\n", path, err) + for _, info := range devices.Devices { + if info.mountCount > 0 { + if err := sysUnmount(info.mountPath, 0); err != nil { + utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } } - delete(devices.activeMounts, path) } for _, d := range devices.Devices { @@ -746,32 +789,42 @@ func (devices *DeviceSet) Shutdown() error { } } - pool := devices.getPoolDevName() - if devinfo, err := getInfo(pool); err == nil && devinfo.Exists != 0 { - if err := devices.deactivateDevice("pool"); err != nil { - utils.Debugf("Shutdown deactivate %s , error: %s\n", pool, err) - } + if err := devices.deactivatePool(); err != nil { + utils.Debugf("Shutdown deactivate pool , error: %s\n", err) } return nil } -func (devices *DeviceSet) MountDevice(hash, path string, readOnly bool) error { +func (devices *DeviceSet) MountDevice(hash, path string) error { devices.Lock() defer devices.Unlock() + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + + if info.mountCount > 0 { + if path != info.mountPath { + return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) + } + + if info.floating { + // Steal floating ref + info.floating = false + } else { + info.mountCount++ + } + return nil + } + if err := devices.activateDeviceIfNeeded(hash); err != nil { return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } - info := devices.Devices[hash] - var flags uintptr = sysMsMgcVal - if readOnly { - flags = flags | sysMsRdOnly - } - err := sysMount(info.DevName(), path, "ext4", flags, "discard") if err != nil && err == sysEInval { err = sysMount(info.DevName(), path, "ext4", flags, "") @@ -780,20 +833,53 @@ func (devices *DeviceSet) MountDevice(hash, path string, readOnly bool) error { return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) } - count := devices.activeMounts[path] - devices.activeMounts[path] = count + 1 + info.mountCount = 1 + info.mountPath = path + info.floating = false return devices.setInitialized(hash) } -func (devices *DeviceSet) UnmountDevice(hash, path string, deactivate bool) error { - utils.Debugf("[devmapper] UnmountDevice(hash=%s path=%s)", hash, path) +func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { + utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode) defer utils.Debugf("[devmapper] UnmountDevice END") devices.Lock() defer devices.Unlock() - utils.Debugf("[devmapper] Unmount(%s)", path) - if err := sysUnmount(path, 0); err != nil { + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("UnmountDevice: no such device %s\n", hash) + } + + if mode == UnmountFloat { + if info.floating { + return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash) + } + + // Leave this reference floating + info.floating = true + return nil + } + + if mode == UnmountSink { + if !info.floating { + // Someone already sunk this + return nil + } + // Otherwise, treat this as a regular unmount + } + + if info.mountCount == 0 { + return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) + } + + info.mountCount-- + if info.mountCount > 0 { + return nil + } + + utils.Debugf("[devmapper] Unmount(%s)", info.mountPath) + if err := sysUnmount(info.mountPath, 0); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } @@ -804,15 +890,9 @@ func (devices *DeviceSet) UnmountDevice(hash, path string, deactivate bool) erro return err } - if count := devices.activeMounts[path]; count > 1 { - devices.activeMounts[path] = count - 1 - } else { - delete(devices.activeMounts, path) - } + devices.deactivateDevice(hash) - if deactivate { - devices.deactivateDevice(hash) - } + info.mountPath = "" return nil } @@ -955,9 +1035,8 @@ func NewDeviceSet(root string, doInit bool) (*DeviceSet, error) { SetDevDir("/dev") devices := &DeviceSet{ - root: root, - MetaData: MetaData{Devices: make(map[string]*DevInfo)}, - activeMounts: make(map[string]int), + root: root, + MetaData: MetaData{Devices: make(map[string]*DevInfo)}, } if err := devices.initDevmapper(doInit); err != nil { diff --git a/graphdriver/devmapper/devmapper.go b/graphdriver/devmapper/devmapper.go index 7f83a09df9..7317118dcf 100644 --- a/graphdriver/devmapper/devmapper.go +++ b/graphdriver/devmapper/devmapper.go @@ -324,7 +324,7 @@ func createPool(poolName string, dataFile, metadataFile *osFile) error { return fmt.Errorf("Can't get data size") } - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768" + params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target") } diff --git a/graphdriver/devmapper/driver.go b/graphdriver/devmapper/driver.go index 664899cfbf..4d414f9a75 100644 --- a/graphdriver/devmapper/driver.go +++ b/graphdriver/devmapper/driver.go @@ -7,8 +7,8 @@ import ( "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/utils" "io/ioutil" + "os" "path" - "sync" ) func init() { @@ -22,9 +22,7 @@ func init() { type Driver struct { *DeviceSet - home string - sync.Mutex // Protects concurrent modification to active - active map[string]int + home string } var Init = func(home string) (graphdriver.Driver, error) { @@ -35,7 +33,6 @@ var Init = func(home string) (graphdriver.Driver, error) { d := &Driver{ DeviceSet: deviceSet, home: home, - active: make(map[string]int), } return d, nil } @@ -83,55 +80,45 @@ func (d *Driver) Create(id, parent string) error { return err } + // We float this reference so that the next Get call can + // steal it, so we don't have to unmount + if err := d.DeviceSet.UnmountDevice(id, UnmountFloat); err != nil { + return err + } + return nil } func (d *Driver) Remove(id string) error { - // Protect the d.active from concurrent access - d.Lock() - defer d.Unlock() - - if d.active[id] != 0 { - utils.Errorf("Warning: removing active id %s\n", id) + // Sink the float from create in case no Get() call was made + if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil { + return err + } + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id); err != nil { + return err } mp := path.Join(d.home, "mnt", id) - if err := d.unmount(id, mp); err != nil { + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { return err } - return d.DeviceSet.RemoveDevice(id) + + return nil } func (d *Driver) Get(id string) (string, error) { - // Protect the d.active from concurrent access - d.Lock() - defer d.Unlock() - - count := d.active[id] - mp := path.Join(d.home, "mnt", id) - if count == 0 { - if err := d.mount(id, mp); err != nil { - return "", err - } + if err := d.mount(id, mp); err != nil { + return "", err } - d.active[id] = count + 1 - return path.Join(mp, "rootfs"), nil } func (d *Driver) Put(id string) { - // Protect the d.active from concurrent access - d.Lock() - defer d.Unlock() - - if count := d.active[id]; count > 1 { - d.active[id] = count - 1 - } else { - mp := path.Join(d.home, "mnt", id) - d.unmount(id, mp) - delete(d.active, id) + if err := d.DeviceSet.UnmountDevice(id, UnmountRegular); err != nil { + utils.Errorf("Warning: error unmounting device %s: %s\n", id, err) } } @@ -140,25 +127,8 @@ func (d *Driver) mount(id, mountPoint string) error { if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { return err } - // If mountpoint is already mounted, do nothing - if mounted, err := Mounted(mountPoint); err != nil { - return fmt.Errorf("Error checking mountpoint: %s", err) - } else if mounted { - return nil - } // Mount the device - return d.DeviceSet.MountDevice(id, mountPoint, false) -} - -func (d *Driver) unmount(id, mountPoint string) error { - // If mountpoint is not mounted, do nothing - if mounted, err := Mounted(mountPoint); err != nil { - return fmt.Errorf("Error checking mountpoint: %s", err) - } else if !mounted { - return nil - } - // Unmount the device - return d.DeviceSet.UnmountDevice(id, mountPoint, true) + return d.DeviceSet.MountDevice(id, mountPoint) } func (d *Driver) Exists(id string) bool { diff --git a/graphdriver/devmapper/driver_test.go b/graphdriver/devmapper/driver_test.go index 785845ce6e..68699f208e 100644 --- a/graphdriver/devmapper/driver_test.go +++ b/graphdriver/devmapper/driver_test.go @@ -136,7 +136,12 @@ type Set map[string]bool func (r Set) Assert(t *testing.T, names ...string) { for _, key := range names { - if _, exists := r[key]; !exists { + required := true + if strings.HasPrefix(key, "?") { + key = key[1:] + required = false + } + if _, exists := r[key]; !exists && required { t.Fatalf("Key not set: %s", key) } delete(r, key) @@ -294,7 +299,7 @@ func TestInit(t *testing.T) { } }() }() - // Put all tests in a funciton to make sure the garbage collection will + // Put all tests in a function to make sure the garbage collection will // occur. // Call GC to cleanup runtime.Finalizers @@ -486,6 +491,7 @@ func TestDriverCreate(t *testing.T) { "ioctl.blkgetsize", "ioctl.loopsetfd", "ioctl.loopsetstatus", + "?ioctl.loopctlgetfree", ) if err := d.Create("1", ""); err != nil { @@ -495,7 +501,6 @@ func TestDriverCreate(t *testing.T) { "DmTaskCreate", "DmTaskGetInfo", "sysMount", - "Mounted", "DmTaskRun", "DmTaskSetTarget", "DmTaskSetSector", @@ -604,6 +609,7 @@ func TestDriverRemove(t *testing.T) { "ioctl.blkgetsize", "ioctl.loopsetfd", "ioctl.loopsetstatus", + "?ioctl.loopctlgetfree", ) if err := d.Create("1", ""); err != nil { @@ -614,7 +620,6 @@ func TestDriverRemove(t *testing.T) { "DmTaskCreate", "DmTaskGetInfo", "sysMount", - "Mounted", "DmTaskRun", "DmTaskSetTarget", "DmTaskSetSector", @@ -645,7 +650,6 @@ func TestDriverRemove(t *testing.T) { "DmTaskSetTarget", "DmTaskSetAddNode", "DmUdevWait", - "Mounted", "sysUnmount", ) }() diff --git a/graphdriver/driver.go b/graphdriver/driver.go index c0ed00b0ad..89fd03a624 100644 --- a/graphdriver/driver.go +++ b/graphdriver/driver.go @@ -28,7 +28,7 @@ type Driver interface { type Differ interface { Diff(id string) (archive.Archive, error) Changes(id string) ([]archive.Change, error) - ApplyDiff(id string, diff archive.Archive) error + ApplyDiff(id string, diff archive.ArchiveReader) error DiffSize(id string) (bytes int64, err error) } diff --git a/hack/MAINTAINERS.md b/hack/MAINTAINERS.md index 8944fbee1a..be3117c864 100644 --- a/hack/MAINTAINERS.md +++ b/hack/MAINTAINERS.md @@ -1,22 +1,24 @@ -# The Docker maintainer manual +# The Docker Maintainer manual ## Introduction -Dear maintainer. Thank you for investing the time and energy to help make Docker as -useful as possible. Maintaining a project is difficult, sometimes unrewarding work. -Sure, you will get to contribute cool features to the project. But most of your time -will be spent reviewing, cleaning up, documenting, answering questions, justifying -design decisions - while everyone has all the fun! But remember - the quality of the -maintainers work is what distinguishes the good projects from the great. -So please be proud of your work, even the unglamourous parts, and encourage a culture -of appreciation and respect for *every* aspect of improving the project - not just the -hot new features. +Dear maintainer. Thank you for investing the time and energy to help +make Docker as useful as possible. Maintaining a project is difficult, +sometimes unrewarding work. Sure, you will get to contribute cool +features to the project. But most of your time will be spent reviewing, +cleaning up, documenting, answering questions, justifying design +decisions - while everyone has all the fun! But remember - the quality +of the maintainers work is what distinguishes the good projects from the +great. So please be proud of your work, even the unglamourous parts, +and encourage a culture of appreciation and respect for *every* aspect +of improving the project - not just the hot new features. -This document is a manual for maintainers old and new. It explains what is expected of -maintainers, how they should work, and what tools are available to them. - -This is a living document - if you see something out of date or missing, speak up! +This document is a manual for maintainers old and new. It explains what +is expected of maintainers, how they should work, and what tools are +available to them. +This is a living document - if you see something out of date or missing, +speak up! ## What are a maintainer's responsibility? @@ -24,19 +26,26 @@ It is every maintainer's responsibility to: * 1) Expose a clear roadmap for improving their component. * 2) Deliver prompt feedback and decisions on pull requests. -* 3) Be available to anyone with questions, bug reports, criticism etc. on their component. This includes irc, github requests and the mailing list. -* 4) Make sure their component respects the philosophy, design and roadmap of the project. - +* 3) Be available to anyone with questions, bug reports, criticism etc. + on their component. This includes IRC, GitHub requests and the mailing + list. +* 4) Make sure their component respects the philosophy, design and + roadmap of the project. ## How are decisions made? Short answer: with pull requests to the docker repository. -Docker is an open-source project with an open design philosophy. This means that the repository is the source of truth for EVERY aspect of the project, -including its philosophy, design, roadmap and APIs. *If it's part of the project, it's in the repo. It's in the repo, it's part of the project.* +Docker is an open-source project with an open design philosophy. This +means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, roadmap and APIs. *If it's +part of the project, it's in the repo. It's in the repo, it's part of +the project.* -As a result, all decisions can be expressed as changes to the repository. An implementation change is a change to the source code. An API change is a change to -the API specification. A philosophy change is a change to the philosophy manifesto. And so on. +As a result, all decisions can be expressed as changes to the +repository. An implementation change is a change to the source code. An +API change is a change to the API specification. A philosophy change is +a change to the philosophy manifesto. And so on. All decisions affecting docker, big and small, follow the same 3 steps: @@ -49,25 +58,36 @@ All decisions affecting docker, big and small, follow the same 3 steps: ## Who decides what? -So all decisions are pull requests, and the relevant maintainer makes the decision by accepting or refusing the pull request. -But how do we identify the relevant maintainer for a given pull request? +So all decisions are pull requests, and the relevant maintainer makes +the decision by accepting or refusing the pull request. But how do we +identify the relevant maintainer for a given pull request? -Docker follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), -with yours truly, Solomon Hykes, in the role of BDFL. -This means that all decisions are made by default by me. Since making every decision myself would be highly un-scalable, in practice decisions are spread across multiple maintainers. +Docker follows the timeless, highly efficient and totally unfair system +known as [Benevolent dictator for +life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with +yours truly, Solomon Hykes, in the role of BDFL. This means that all +decisions are made by default by Solomon. Since making every decision +myself would be highly un-scalable, in practice decisions are spread +across multiple maintainers. The relevant maintainer for a pull request is assigned in 3 steps: -* Step 1: Determine the subdirectory affected by the pull request. This might be src/registry, docs/source/api, or any other part of the repo. +* Step 1: Determine the subdirectory affected by the pull request. This + might be `src/registry`, `docs/source/api`, or any other part of the repo. -* Step 2: Find the MAINTAINERS file which affects this directory. If the directory itself does not have a MAINTAINERS file, work your way up the repo hierarchy until you find one. +* Step 2: Find the `MAINTAINERS` file which affects this directory. If the + directory itself does not have a `MAINTAINERS` file, work your way up + the repo hierarchy until you find one. -* Step 3: The first maintainer listed is the primary maintainer. The pull request is assigned to him. He may assign it to other listed maintainers, at his discretion. +* Step 3: The first maintainer listed is the primary maintainer. The + pull request is assigned to him. He may assign it to other listed + maintainers, at his discretion. ### I'm a maintainer, should I make pull requests too? -Yes. Nobody should ever push to master directly. All changes should be made through a pull request. +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. ### Who assigns maintainers? diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index b50306430b..5dcb120689 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -1,65 +1,94 @@ -Dear packager. +# Dear Packager, -If you are looking to make docker available on your favorite software distribution, -this document is for you. It summarizes the requirements for building and running -docker. +If you are looking to make Docker available on your favorite software +distribution, this document is for you. It summarizes the requirements for +building and running the Docker client and the Docker daemon. -## Getting started +## Getting Started -We really want to help you package Docker successfully. Before anything, a good first step -is to introduce yourself on the [docker-dev mailing list](https://groups.google.com/forum/?fromgroups#!forum/docker-dev) -, explain what you''re trying to achieve, and tell us how we can help. Don''t worry, we don''t bite! -There might even be someone already working on packaging for the same distro! +We want to help you package Docker successfully. Before doing any packaging, a +good first step is to introduce yourself on the [docker-dev mailing +list](https://groups.google.com/d/forum/docker-dev), explain what you're trying +to achieve, and tell us how we can help. Don't worry, we don't bite! There might +even be someone already working on packaging for the same distro! -You can also join the IRC channel - #docker and #docker-dev on Freenode are both active and friendly. +You can also join the IRC channel - #docker and #docker-dev on Freenode are both +active and friendly. -## Package name +We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our +"Packagers Relations", since he's always working to make sure our packagers have +a good, healthy upstream to work with (both in our communication and in our +build scripts). If you're having any kind of trouble, feel free to ping him +directly. He also likes to keep track of what distributions we have packagers +for, so feel free to reach out to him even just to say "Hi!" -If possible, your package should be called "docker". If that name is already taken, a second -choice is "lxc-docker". +## Package Name -## Official build vs distro build +If possible, your package should be called "docker". If that name is already +taken, a second choice is "lxc-docker", but with the caveat that "LXC" is now an +optional dependency (as noted below). Another possible choice is "docker.io". -The Docker project maintains its own build and release toolchain. It is pretty neat and entirely -based on Docker (surprise!). This toolchain is the canonical way to build Docker, and the only -method supported by the development team. We encourage you to give it a try, and if the circumstances +## Official Build vs Distro Build + +The Docker project maintains its own build and release toolchain. It is pretty +neat and entirely based on Docker (surprise!). This toolchain is the canonical +way to build Docker. We encourage you to give it a try, and if the circumstances allow you to use it, we recommend that you do. -You might not be able to use the official build toolchain - usually because your distribution has a -toolchain and packaging policy of its own. We get it! Your house, your rules. The rest of this document -should give you the information you need to package Docker your way, without denaturing it in -the process. +You might not be able to use the official build toolchain - usually because your +distribution has a toolchain and packaging policy of its own. We get it! Your +house, your rules. The rest of this document should give you the information you +need to package Docker your way, without denaturing it in the process. -## System build dependencies +## Build Dependencies -To build docker, you will need the following system dependencies +To build Docker, you will need the following: -* An amd64 machine * A recent version of git and mercurial * Go version 1.2 or later +* A clean checkout of the source added to a valid [Go + workspace](http://golang.org/doc/code.html#Workspaces) under the path + *src/github.com/dotcloud/docker* (unless you plan to use `AUTO_GOPATH`, + explained in more detail below). + +To build the Docker daemon, you will additionally need: + +* An amd64/x86_64 machine running Linux * SQLite version 3.7.9 or later -* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version 2.02.89 or later -* btrfs-progs version 3.8 or later (including commit e5cb128 from 2013-01-07) for the necessary btrfs headers -* A clean checkout of the source must be added to a valid Go [workspace](http://golang.org/doc/code.html#Workspaces) -under the path *src/github.com/dotcloud/docker*. +* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version + 2.02.89 or later +* btrfs-progs version 3.8 or later (including commit e5cb128 from 2013-01-07) + for the necessary btrfs headers -## Go dependencies +Be sure to also check out Docker's Dockerfile for the most up-to-date list of +these build-time dependencies. -All Go dependencies are vendored under ./vendor. They are used by the official build, -so the source of truth for the current version is whatever is in ./vendor. +### Go Dependencies -To use the vendored dependencies, simply make sure the path to ./vendor is included in $GOPATH. +All Go dependencies are vendored under "./vendor". They are used by the official +build, so the source of truth for the current version of each dependency is +whatever is in "./vendor". -If you would rather package these dependencies yourself, take a look at ./hack/vendor.sh for an -easy-to-parse list of the exact version for each. +To use the vendored dependencies, simply make sure the path to "./vendor" is +included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). -NOTE: if you''re not able to package the exact version (to the exact commit) of a given dependency, -please get in touch so we can remediate! Who knows what discrepancies can be caused by even the -slightest deviation. We promise to do our best to make everybody happy. +If you would rather (or must, due to distro policy) package these dependencies +yourself, take a look at "./hack/vendor.sh" for an easy-to-parse list of the +exact version for each. + +NOTE: if you're not able to package the exact version (to the exact commit) of a +given dependency, please get in touch so we can remediate! Who knows what +discrepancies can be caused by even the slightest deviation. We promise to do +our best to make everybody happy. ## Stripping Binaries -Please, please, please do not strip any compiled binaries. This is really important. +Please, please, please do not strip any compiled binaries. This is really +important. + +In our own testing, stripping the resulting binaries sometimes results in a +binary that appears to work, but more often causes random panics, segfaults, and +other issues. Even if the binary appears to work, please don't strip. See the following quotes from Dave Cheney, which explain this position better from the upstream Golang perspective. @@ -94,79 +123,172 @@ from the upstream Golang perspective. ## Building Docker -To build the docker binary, run the following command with the source checkout as the -working directory: +Please use our build script ("./hack/make.sh") for all your compilation of +Docker. If there's something you need that it isn't doing, or something it could +be doing to make your life as a packager easier, please get in touch with Tianon +and help us rectify the situation. Chances are good that other packagers have +probably run into the same problems and a fix might already be in the works, but +none of us will know for sure unless you harass Tianon about it. :) + +All the commands listed within this section should be run with the Docker source +checkout as the current working directory. + +### `AUTO_GOPATH` + +If you'd rather not be bothered with the hassles that setting up `GOPATH` +appropriately can be, and prefer to just get a "build that works", you should +add something similar to this to whatever script or process you're using to +build Docker: + +```bash +export AUTO_GOPATH=1 +``` + +This will cause the build scripts to set up a reasonable `GOPATH` that +automatically and properly includes both dotcloud/docker from the local +directory, and the local "./vendor" directory as necessary. + +### `DOCKER_BUILDTAGS` + +If you're building a binary that may need to be used on platforms that include +AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: + +```bash +export DOCKER_BUILDTAGS='apparmor' +``` + +### Static Daemon + +If it is feasible within the constraints of your distribution, you should +seriously consider packaging Docker as a single static binary. A good comparison +is Busybox, which is often packaged statically as a feature to enable mass +portability. Because of the unique way Docker operates, being similarly static +is a "feature". + +To build a static Docker daemon binary, run the following command (first +ensuring that all the necessary libraries are available in static form for +linking - see the "Build Dependencies" section above, and the relevant lines +within Docker's own Dockerfile that set up our official build environment): ```bash ./hack/make.sh binary ``` -This will create a static binary under *./bundles/$VERSION/binary/docker-$VERSION*, where -*$VERSION* is the contents of the file *./VERSION*. +This will create a static binary under +"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of +the file "./VERSION". This binary is usually installed somewhere like +"/usr/bin/docker". -You are encouraged to use ./hack/make.sh without modification. If you must absolutely write -your own script (are you really, really sure you need to? make.sh is really not that complicated), -then please take care the respect the following: +### Dynamic Daemon / Client-only Binary -* In *./hack/make.sh*: $LDFLAGS, $BUILDFLAGS, $VERSION and $GITCOMMIT -* In *./hack/make/binary*: the exact build command to run +If you are only interested in a Docker client binary, set `DOCKER_CLIENTONLY` to a non-empty value using something similar to the following: (which will prevent the extra step of compiling dockerinit) -You may be tempted to tweak these settings. In particular, being a rigorous maintainer, you may want -to disable static linking. Please don''t! Docker *needs* to be statically linked to function properly. -You would do the users of your distro a disservice and "void the docker warranty" by changing the flags. +```bash +export DOCKER_CLIENTONLY=1 +``` -A good comparison is Busybox: all distros package it as a statically linked binary, because it just -makes sense. Docker is the same way. - -If you *must* have a non-static Docker binary, please use: +If you need to (due to distro policy, distro library availability, or for other +reasons) create a dynamically compiled daemon binary, or if you are only +interested in creating a client binary for Docker, use something similar to the +following: ```bash ./hack/make.sh dynbinary ``` -This will create *./bundles/$VERSION/dynbinary/docker-$VERSION* and *./bundles/$VERSION/binary/dockerinit-$VERSION*. -The first of these would usually be installed at */usr/bin/docker*, while the second must be installed -at */usr/libexec/docker/dockerinit*. +This will create "./bundles/$VERSION/dynbinary/docker-$VERSION", which for +client-only builds is the important file to grab and install as appropriate. -## Testing Docker +For daemon builds, you will also need to grab and install +"./bundles/$VERSION/dynbinary/dockerinit-$VERSION", which is created from the +minimal set of Docker's codebase that _must_ be compiled statically (and is thus +a pure static binary). The acceptable locations Docker will search for this file +are as follows (in order): -Before releasing your binary, make sure to run the tests! Run the following command with the source -checkout as the working directory: +* as "dockerinit" in the same directory as the daemon binary (ie, if docker is + installed at "/usr/bin/docker", then "/usr/bin/dockerinit" will be the first + place this file is searched for) +* "/usr/libexec/docker/dockerinit" or "/usr/local/libexec/docker/dockerinit" + ([FHS 3.0 Draft](http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec)) +* "/usr/lib/docker/dockerinit" or "/usr/local/lib/docker/dockerinit" ([FHS + 2.3](http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA)) + +If (and please, only if) one of the paths above is insufficient due to distro +policy or similar issues, you may use the `DOCKER_INITPATH` environment variable +at compile-time as follows to set a different path for Docker to search: ```bash -./hack/make.sh test +export DOCKER_INITPATH=/usr/lib/docker.io/dockerinit ``` -The test suite includes both live integration tests and unit tests, so you will need all runtime -dependencies to be installed (see below). +If you find yourself needing this, please don't hesitate to reach out to Tianon +to see if it would be reasonable or helpful to add more paths to Docker's list, +especially if there's a relevant standard worth referencing (such as the FHS). -The test suite will also download a small test container, so you will need internet connectivity. +Also, it goes without saying, but for the purposes of the daemon please consider +these two binaries ("docker" and "dockerinit") as if they were a single unit. +Mixing and matching can cause undesired consequences, and will fail to run +properly. -## Runtime dependencies +## System Dependencies -To run properly, docker needs the following software to be installed at runtime: +### Runtime Dependencies + +To function properly, the Docker daemon needs the following software to be +installed and available at runtime: -* iproute2 version 3.5 or later (build after 2012-05-21), and specifically the "ip" utility * iptables version 1.4 or later -* The LXC utility scripts (http://lxc.sourceforge.net) version 0.8 or later +* XZ Utils version 4.9 or later + +Additionally, the Docker client needs the following software to be installed and +available at runtime: + * Git version 1.7 or later -* XZ Utils 4.9 or later -## Kernel dependencies +### Kernel Requirements -Docker in daemon mode has specific kernel requirements. For details, see -http://docs.docker.io/en/latest/installation/kernel/ +The Docker daemon has very specific kernel requirements. Most pre-packaged +kernels already include the necessary options enabled. If you are building your +own kernel, you will either need to discover the options necessary via trial and +error, or check out the [Gentoo +ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), +in which a list is maintained (and if there are any issues or discrepancies in +that list, please contact Tianon so they can be rectified). -Note that Docker also has a client mode, which can run on virtually any linux kernel (it even builds -on OSX!). +Note that in client mode, there are no specific kernel requirements, and that +the client will even run on alternative platforms such as Mac OS X / Darwin. -## Init script +### Optional Dependencies -Docker expects to run as a daemon at machine startup. Your package will need to include a script -for your distro''s process supervisor of choice. +Some of Docker's features are activated by using optional command-line flags or +by having support for them in the kernel or userspace. A few examples include: -Docker should be run as root, with the following arguments: +* LXC execution driver (requires version 0.8 or later of the LXC utility scripts) +* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at + least the "auplink" utility from aufs-tools) +* experimental BTRFS graph driver (requires BTRFS support enabled in the kernel) + +## Daemon Init Script + +Docker expects to run as a daemon at machine startup. Your package will need to +include a script for your distro's process supervisor of choice. Be sure to +check out the "contrib/init" folder in case a suitable init script already +exists (and if one does not, contact Tianon about whether it might be +appropriate for your distro's init script to live there too!). + +In general, Docker should be run as root, similar to the following: ```bash docker -d ``` + +Generally, a `DOCKER_OPTS` variable of some kind is available for adding more +flags (such as changing the graph driver to use BTRFS, switching the location of +"/var/lib/docker", etc). + +## Communicate + +As a final note, please do feel free to reach out to Tianon at any time for +pretty much anything. He really does love hearing from our packagers and wants +to make sure we're not being a "hostile upstream". As should be a given, we +appreciate the work our packagers do to make sure we have broad distribution! diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md index a7ae45f2ff..84a0ff70e1 100644 --- a/hack/RELEASE-CHECKLIST.md +++ b/hack/RELEASE-CHECKLIST.md @@ -173,9 +173,13 @@ git push origin $VERSION It's very important that we don't make the tag until after the official release is uploaded to get.docker.io! -### 10. Go to github to merge the `bump_$VERSION` into release +### 10. Go to github to merge the `bump_$VERSION` branch into release -Merging the pull request to the release branch will automatically +Don't delete the leftover branch just yet, as we will need it for the next step. + +### 11. Go to github to merge the `bump_$VERSION` branch into docs + +Merging the pull request to the docs branch will automatically update the documentation on the "latest" revision of the docs. You should see the updated docs 5-10 minutes after the merge. The docs will appear on http://docs.docker.io/. For more information about @@ -184,7 +188,7 @@ documentation releases, see `docs/README.md`. Don't forget to push that pretty blue button to delete the leftover branch afterwards! -### 11. Create a new pull request to merge release back into master +### 12. Create a new pull request to merge release back into master ```bash git checkout master @@ -202,7 +206,7 @@ echo "https://github.com/dotcloud/docker/compare/master...merge_release_$VERSION Again, get two maintainers to validate, then merge, then push that pretty blue button to delete your branch. -### 12. Rejoice and Evangelize! +### 13. Rejoice and Evangelize! Congratulations! You're done. diff --git a/hack/infrastructure/docker-ci.rst b/hack/infrastructure/docker-ci.rst deleted file mode 100644 index 0be530d302..0000000000 --- a/hack/infrastructure/docker-ci.rst +++ /dev/null @@ -1,56 +0,0 @@ -docker-ci -========= - -docker-ci is our buildbot continuous integration server, -building and testing docker, hosted on EC2 and reachable at -http://docker-ci.dotcloud.com - - -Deployment -========== - -# Load AWS credentials -export AWS_ACCESS_KEY_ID='' -export AWS_SECRET_ACCESS_KEY='' -export AWS_KEYPAIR_NAME='' -export AWS_SSH_PRIVKEY='' - -# Load buildbot credentials and config -export BUILDBOT_PWD='' -export IRC_PWD='' -export IRC_CHANNEL='docker-dev' -export SMTP_USER='' -export SMTP_PWD='' -export EMAIL_RCP='' - -# Load registry test credentials -export REGISTRY_USER='' -export REGISTRY_PWD='' - -cd docker/testing -vagrant up --provider=aws - - -github pull request -=================== - -The entire docker pull request test workflow is event driven by github. Its -usage is fully automatic and the results are logged in docker-ci.dotcloud.com - -Each time there is a pull request on docker's github project, github connects -to docker-ci using github's rest API documented in http://developer.github.com/v3/repos/hooks -The issued command to program github's notification PR event was: -curl -u GITHUB_USER:GITHUB_PASSWORD -d '{"name":"web","active":true,"events":["pull_request"],"config":{"url":"http://docker-ci.dotcloud.com:8011/change_hook/github?project=docker"}}' https://api.github.com/repos/dotcloud/docker/hooks - -buildbot (0.8.7p1) was patched using ./testing/buildbot/github.py, so it -can understand the PR data github sends to it. Originally PR #1603 (ee64e099e0) -implemented this capability. Also we added a new scheduler to exclusively filter -PRs. and the 'pullrequest' builder to rebase the PR on top of master and test it. - - -nighthly release -================ - -The nightly release process is done by buildbot, running a DinD container that downloads -the docker repository and builds the release container. The resulting docker -binary is then tested, and if everything is fine, the release is done. diff --git a/hack/infrastructure/docker-ci/Dockerfile b/hack/infrastructure/docker-ci/Dockerfile index d894330ffa..fd795f4d45 100644 --- a/hack/infrastructure/docker-ci/Dockerfile +++ b/hack/infrastructure/docker-ci/Dockerfile @@ -1,47 +1,29 @@ -# VERSION: 0.25 -# DOCKER-VERSION 0.6.6 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Deploy docker-ci on Digital Ocean -# COMMENTS: -# CONFIG_JSON is an environment variable json string loaded as: -# -# export CONFIG_JSON=' -# { "DROPLET_NAME": "docker-ci", -# "DO_CLIENT_ID": "Digital_Ocean_client_id", -# "DO_API_KEY": "Digital_Ocean_api_key", -# "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id", -# "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path", -# "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", -# "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)", -# "BUILDBOT_PWD": "Buildbot_server_password", -# "IRC_PWD": "Buildbot_IRC_password", -# "SMTP_USER": "SMTP_server_user", -# "SMTP_PWD": "SMTP_server_password", -# "PKG_ACCESS_KEY": "Docker_release_S3_bucket_access_key", -# "PKG_SECRET_KEY": "Docker_release_S3_bucket_secret_key", -# "PKG_GPG_PASSPHRASE": "Docker_release_gpg_passphrase", -# "INDEX_AUTH": "Index_encripted_user_password", -# "REGISTRY_USER": "Registry_test_user", -# "REGISTRY_PWD": "Registry_test_password", -# "REGISTRY_BUCKET": "Registry_S3_bucket_name", -# "REGISTRY_ACCESS_KEY": "Registry_S3_bucket_access_key", -# "REGISTRY_SECRET_KEY": "Registry_S3_bucket_secret_key", -# "IRC_CHANNEL": "Buildbot_IRC_channel", -# "EMAIL_RCP": "Buildbot_mailing_receipient" }' -# -# -# TO_BUILD: docker build -t docker-ci . -# TO_DEPLOY: docker run -e CONFIG_JSON="${CONFIG_JSON}" docker-ci +# DOCKER-VERSION: 0.7.6 +# AUTHOR: Daniel Mizyrycki +# DESCRIPTION: docker-ci continuous integration service +# TO_BUILD: docker build -rm -t docker-ci/docker-ci . +# TO_RUN: docker run -rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \ +# -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci from ubuntu:12.04 +maintainer Daniel Mizyrycki -run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' \ - > /etc/apt/sources.list -run apt-get update; apt-get install -y git python2.7 python-dev libevent-dev \ - python-pip ssh rsync less vim -run pip install requests fabric +ENV DEBIAN_FRONTEND noninteractive +RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \ + /etc/apt/sources.list; apt-get update +RUN apt-get install -y --no-install-recommends python2.7 python-dev \ + libevent-dev git supervisor ssh rsync less vim sudo gcc wget nginx +RUN cd /tmp; wget http://python-distribute.org/distribute_setup.py +RUN cd /tmp; python distribute_setup.py; easy_install pip; rm distribute_setup.py -# Add deployment code and set default container command -add . /docker-ci -cmd "/docker-ci/deployment.py" +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 +RUN echo 'deb http://get.docker.io/ubuntu docker main' > \ + /etc/apt/sources.list.d/docker.list; apt-get update +RUN apt-get install -y lxc-docker-0.8.0 +RUN pip install SQLAlchemy==0.7.10 buildbot buildbot-slave pyopenssl boto +RUN ln -s /var/socket/docker.sock /run/docker.sock +ADD . /docker-ci +RUN /docker-ci/setup.sh + +ENTRYPOINT ["supervisord", "-n"] diff --git a/hack/infrastructure/docker-ci/README.rst b/hack/infrastructure/docker-ci/README.rst index 33a14359bf..3e429ffdd5 100644 --- a/hack/infrastructure/docker-ci/README.rst +++ b/hack/infrastructure/docker-ci/README.rst @@ -1,26 +1,65 @@ -======= -testing -======= +========= +docker-ci +========= -This directory contains docker-ci testing related files. +This directory contains docker-ci continuous integration system. +As expected, it is a fully dockerized and deployed using +docker-container-runner. +docker-ci is based on Buildbot, a continuous integration system designed +to automate the build/test cycle. By automatically rebuilding and testing +the tree each time something has changed, build problems are pinpointed +quickly, before other developers are inconvenienced by the failure. +We are running buildbot at Rackspace to verify docker and docker-registry +pass tests, and check for coverage code details. + +docker-ci instance is at https://docker-ci.docker.io/waterfall + +Inside docker-ci container we have the following directory structure: + +/docker-ci source code of docker-ci +/data/backup/docker-ci/ daily backup (replicated over S3) +/data/docker-ci/coverage/{docker,docker-registry}/ mapped to host volumes +/data/buildbot/{master,slave}/ main docker-ci buildbot config and database +/var/socket/{docker.sock} host volume access to docker socket -Buildbot -======== +Production deployment +===================== -Buildbot is a continuous integration system designed to automate the -build/test cycle. By automatically rebuilding and testing the tree each time -something has changed, build problems are pinpointed quickly, before other -developers are inconvenienced by the failure. +:: -We are running buildbot in Amazon's EC2 to verify docker passes all -tests when commits get pushed to the master branch and building -nightly releases using Docker in Docker awesome implementation made -by Jerome Petazzoni. + # Clone docker-ci repository + git clone https://github.com/dotcloud/docker + cd docker/hack/infrastructure/docker-ci -https://github.com/jpetazzo/dind + export DOCKER_PROD=[PRODUCTION_SERVER_IP] -Docker's buildbot instance is at http://docker-ci.dotcloud.com/waterfall + # Create data host volume. (only once) + docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ + mkdir -p /data/docker-ci/coverage/docker + docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ + mkdir -p /data/docker-ci/coverage/docker-registry + docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ + chown -R 1000.1000 /data/docker-ci -For deployment instructions, please take a look at -hack/infrastructure/docker-ci/Dockerfile + # dcr deployment. Define credentials and special environment dcr variables + # ( retrieved at /hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml ) + export WEB_USER=[DOCKER-CI-WEBSITE-USERNAME] + export WEB_IRC_PWD=[DOCKER-CI-WEBSITE-PASSWORD] + export BUILDBOT_PWD=[BUILDSLAVE_PASSWORD] + export AWS_ACCESS_KEY=[DOCKER_RELEASE_S3_ACCESS] + export AWS_SECRET_KEY=[DOCKER_RELEASE_S3_SECRET] + export GPG_PASSPHRASE=[DOCKER_RELEASE_PASSPHRASE] + export BACKUP_AWS_ID=[S3_BUCKET_CREDENTIAL_ACCESS] + export BACKUP_AWS_SECRET=[S3_BUCKET_CREDENTIAL_SECRET] + export SMTP_USER=[MAILGUN_SMTP_USERNAME] + export SMTP_PWD=[MAILGUN_SMTP_PASSWORD] + export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS] + + # Build docker-ci and testbuilder docker images + docker -H $DOCKER_PROD build -rm -t docker-ci/docker-ci . + (cd testbuilder; docker -H $DOCKER_PROD build -rm -t docker-ci/testbuilder .) + + # Run docker-ci container ( assuming no previous container running ) + (cd dcr/prod; dcr docker-ci.yml start) + (cd dcr/prod; dcr docker-ci.yml register docker-ci.docker.io) diff --git a/hack/infrastructure/docker-ci/VERSION b/hack/infrastructure/docker-ci/VERSION index 0bfccb0804..b49b25336d 100644 --- a/hack/infrastructure/docker-ci/VERSION +++ b/hack/infrastructure/docker-ci/VERSION @@ -1 +1 @@ -0.4.5 +0.5.6 diff --git a/hack/infrastructure/docker-ci/buildbot/README.rst b/hack/infrastructure/docker-ci/buildbot/README.rst deleted file mode 100644 index 6cbcb8d93a..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/README.rst +++ /dev/null @@ -1 +0,0 @@ -Buildbot configuration and setup files diff --git a/hack/infrastructure/docker-ci/buildbot/buildbot.conf b/hack/infrastructure/docker-ci/buildbot/buildbot.conf deleted file mode 100644 index e07b2e3c8c..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/buildbot.conf +++ /dev/null @@ -1,18 +0,0 @@ -[program:buildmaster] -command=twistd --nodaemon --no_save -y buildbot.tac -directory=/data/buildbot/master -chown= root:root -redirect_stderr=true -stdout_logfile=/var/log/supervisor/buildbot-master.log -stderr_logfile=/var/log/supervisor/buildbot-master.log - -[program:buildworker] -command=twistd --nodaemon --no_save -y buildbot.tac -directory=/data/buildbot/slave -chown= root:root -redirect_stderr=true -stdout_logfile=/var/log/supervisor/buildbot-slave.log -stderr_logfile=/var/log/supervisor/buildbot-slave.log - -[group:buildbot] -programs=buildmaster,buildworker diff --git a/hack/infrastructure/docker-ci/buildbot/github.py b/hack/infrastructure/docker-ci/buildbot/github.py index ff6b6c62dd..5316e13282 100644 --- a/hack/infrastructure/docker-ci/buildbot/github.py +++ b/hack/infrastructure/docker-ci/buildbot/github.py @@ -17,7 +17,7 @@ """ github_buildbot.py is based on git_buildbot.py -github_buildbot.py will determine the repository information from the JSON +github_buildbot.py will determine the repository information from the JSON HTTP POST it receives from github.com and build the appropriate repository. If your github repository is private, you must add a ssh key to the github repository for the user who initiated the build on the buildslave. @@ -88,7 +88,8 @@ def getChanges(request, options = None): payload = json.loads(request.args['payload'][0]) import urllib,datetime fname = str(datetime.datetime.now()).replace(' ','_').replace(':','-')[:19] - open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2)) + # Github event debug + # open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2)) if 'pull_request' in payload: user = payload['pull_request']['user']['login'] @@ -142,13 +143,13 @@ def process_change(payload, user, repo, repo_url, project): 'category' : 'github_pullrequest', 'who' : '{0} - PR#{1}'.format(user,payload['number']), 'files' : [], - 'comments' : payload['pull_request']['title'], + 'comments' : payload['pull_request']['title'], 'revision' : newrev, 'when' : convertTime(payload['pull_request']['updated_at']), 'branch' : branch, 'revlink' : '{0}/commit/{1}'.format(repo_url,newrev), 'repository' : repo_url, - 'project' : project }] + 'project' : project }] return changes for commit in payload['commits']: files = [] diff --git a/hack/infrastructure/docker-ci/buildbot/master.cfg b/hack/infrastructure/docker-ci/buildbot/master.cfg index 9ca5fc035a..75605da8ab 100644 --- a/hack/infrastructure/docker-ci/buildbot/master.cfg +++ b/hack/infrastructure/docker-ci/buildbot/master.cfg @@ -1,4 +1,4 @@ -import os +import os, re from buildbot.buildslave import BuildSlave from buildbot.schedulers.forcesched import ForceScheduler from buildbot.schedulers.basic import SingleBranchScheduler @@ -6,127 +6,156 @@ from buildbot.schedulers.timed import Nightly from buildbot.changes import filter from buildbot.config import BuilderConfig from buildbot.process.factory import BuildFactory -from buildbot.process.properties import Interpolate +from buildbot.process.properties import Property from buildbot.steps.shell import ShellCommand from buildbot.status import html, words from buildbot.status.web import authz, auth from buildbot.status.mail import MailNotifier -PORT_WEB = 80 # Buildbot webserver port -PORT_GITHUB = 8011 # Buildbot github hook port -PORT_MASTER = 9989 # Port where buildbot master listen buildworkers -TEST_USER = 'buildbot' # Credential to authenticate build triggers -TEST_PWD = 'docker' # Credential to authenticate build triggers -GITHUB_DOCKER = 'github.com/dotcloud/docker' -BUILDBOT_PATH = '/data/buildbot' -DOCKER_PATH = '/go/src/github.com/dotcloud/docker' -DOCKER_CI_PATH = '/docker-ci' + +def ENV(x): + '''Promote an environment variable for global use returning its value''' + retval = os.environ.get(x, '') + globals()[x] = retval + return retval + + +class TestCommand(ShellCommand): + '''Extend ShellCommand with optional summary logs''' + def __init__(self, *args, **kwargs): + super(TestCommand, self).__init__(*args, **kwargs) + + def createSummary(self, log): + exit_status = re.sub(r'.+\n\+ exit (\d+).+', + r'\1', log.getText()[-100:], flags=re.DOTALL) + if exit_status != '0': + return + # Infer coverage path from log + if '+ COVERAGE_PATH' in log.getText(): + path = re.sub(r'.+\+ COVERAGE_PATH=((.+?)-\d+).+', + r'\2/\1', log.getText(), flags=re.DOTALL) + url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) + self.addURL('coverage', url) + elif 'COVERAGE_FILE' in log.getText(): + path = re.sub(r'.+\+ COVERAGE_FILE=((.+?)-\d+).+', + r'\2/\1', log.getText(), flags=re.DOTALL) + url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) + self.addURL('coverage', url) + + +PORT_WEB = 8000 # Buildbot webserver port +PORT_GITHUB = 8011 # Buildbot github hook port +PORT_MASTER = 9989 # Port where buildbot master listen buildworkers + +BUILDBOT_URL = '//localhost:{}/'.format(PORT_WEB) +DOCKER_REPO = 'https://github.com/docker-test/docker' +DOCKER_TEST_ARGV = 'HEAD {}'.format(DOCKER_REPO) +REGISTRY_REPO = 'https://github.com/docker-test/docker-registry' +REGISTRY_TEST_ARGV = 'HEAD {}'.format(REGISTRY_REPO) +if ENV('DEPLOYMENT') == 'staging': + BUILDBOT_URL = "//docker-ci-stage.docker.io/" +if ENV('DEPLOYMENT') == 'production': + BUILDBOT_URL = '//docker-ci.docker.io/' + DOCKER_REPO = 'https://github.com/dotcloud/docker' + DOCKER_TEST_ARGV = '' + REGISTRY_REPO = 'https://github.com/dotcloud/docker-registry' + REGISTRY_TEST_ARGV = '' # Credentials set by setup.sh from deployment.py -BUILDBOT_PWD = '' -IRC_PWD = '' -IRC_CHANNEL = '' -SMTP_USER = '' -SMTP_PWD = '' -EMAIL_RCP = '' +ENV('WEB_USER') +ENV('WEB_IRC_PWD') +ENV('BUILDBOT_PWD') +ENV('SMTP_USER') +ENV('SMTP_PWD') +ENV('EMAIL_RCP') +ENV('IRC_CHANNEL') c = BuildmasterConfig = {} -c['title'] = "Docker" +c['title'] = "docker-ci" c['titleURL'] = "waterfall" -c['buildbotURL'] = "http://docker-ci.dotcloud.com/" +c['buildbotURL'] = BUILDBOT_URL c['db'] = {'db_url':"sqlite:///state.sqlite"} c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)] c['slavePortnum'] = PORT_MASTER # Schedulers -c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker', - 'index','registry','docker-coverage','registry-coverage','nightlyrelease'])] -c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None, +c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[ + 'docker', 'docker-registry', 'nightlyrelease', 'backup'])] +c['schedulers'] += [SingleBranchScheduler(name="docker", treeStableTimer=None, change_filter=filter.ChangeFilter(branch='master', - repository='https://github.com/dotcloud/docker'), builderNames=['docker'])] -c['schedulers'] += [SingleBranchScheduler(name='pullrequest', - change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None, - builderNames=['pullrequest'])] -c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease', - 'docker-coverage','registry-coverage'], hour=7, minute=00)] -c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'], - hour=range(0,24,4), minute=15)] + repository=DOCKER_REPO), builderNames=['docker'])] +c['schedulers'] += [SingleBranchScheduler(name="registry", treeStableTimer=None, + change_filter=filter.ChangeFilter(branch='master', + repository=REGISTRY_REPO), builderNames=['docker-registry'])] +c['schedulers'] += [SingleBranchScheduler(name='docker-pr', treeStableTimer=None, + change_filter=filter.ChangeFilter(category='github_pullrequest', + project='docker'), builderNames=['docker-pr'])] +c['schedulers'] += [SingleBranchScheduler(name='docker-registry-pr', treeStableTimer=None, + change_filter=filter.ChangeFilter(category='github_pullrequest', + project='docker-registry'), builderNames=['docker-registry-pr'])] +c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=[ + 'nightlyrelease', 'backup'], hour=7, minute=00)] + # Builders -# Docker commit test -test_cmd = ('docker run -privileged mzdaniel/test_docker hack/dind' - ' test_docker.sh %(src::revision)s') + +# Backup factory = BuildFactory() -factory.addStep(ShellCommand(description='Docker', logEnviron=False, - usePTY=True, command=["sh", "-c", Interpolate(test_cmd)])) -c['builders'] = [BuilderConfig(name='docker',slavenames=['buildworker'], +factory.addStep(TestCommand(description='backup', logEnviron=False, + usePTY=True, command='/docker-ci/tool/backup.py')) +c['builders'] = [BuilderConfig(name='backup',slavenames=['buildworker'], + factory=factory)] + +# Docker test +factory = BuildFactory() +factory.addStep(TestCommand(description='docker', logEnviron=False, + usePTY=True, command='/docker-ci/dockertest/docker {}'.format(DOCKER_TEST_ARGV))) +c['builders'] += [BuilderConfig(name='docker',slavenames=['buildworker'], factory=factory)] # Docker pull request test -test_cmd = ('docker run -privileged mzdaniel/test_docker hack/dind' - ' test_docker.sh %(src::revision)s %(src::repository)s %(src::branch)s') factory = BuildFactory() -factory.addStep(ShellCommand(description='pull_request', logEnviron=False, - usePTY=True, command=["sh", "-c", Interpolate(test_cmd)])) -c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'], +factory.addStep(TestCommand(description='docker-pr', logEnviron=False, + usePTY=True, command=['/docker-ci/dockertest/docker', + Property('revision'), Property('repository'), Property('branch')])) +c['builders'] += [BuilderConfig(name='docker-pr',slavenames=['buildworker'], factory=factory)] -# Docker coverage test +# docker-registry test factory = BuildFactory() -factory.addStep(ShellCommand(description='docker-coverage', logEnviron=False, - usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format( - DOCKER_CI_PATH))) -c['builders'] += [BuilderConfig(name='docker-coverage',slavenames=['buildworker'], +factory.addStep(TestCommand(description='docker-registry', logEnviron=False, + usePTY=True, command='/docker-ci/dockertest/docker-registry {}'.format(REGISTRY_TEST_ARGV))) +c['builders'] += [BuilderConfig(name='docker-registry',slavenames=['buildworker'], factory=factory)] -# Docker registry coverage test +# Docker registry pull request test factory = BuildFactory() -factory.addStep(ShellCommand(description='registry-coverage', logEnviron=False, - usePTY=True, command='docker run registry_coverage'.format( - DOCKER_CI_PATH))) -c['builders'] += [BuilderConfig(name='registry-coverage',slavenames=['buildworker'], - factory=factory)] - -# Registry functional test -factory = BuildFactory() -factory.addStep(ShellCommand(description='registry', logEnviron=False, - command='. {0}/master/credentials.cfg; ' - '{1}/functionaltests/test_registry.sh'.format(BUILDBOT_PATH, DOCKER_CI_PATH), - usePTY=True)) -c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'], - factory=factory)] - -# Index functional test -factory = BuildFactory() -factory.addStep(ShellCommand(description='index', logEnviron=False, - command='. {0}/master/credentials.cfg; ' - '{1}/functionaltests/test_index.py'.format(BUILDBOT_PATH, DOCKER_CI_PATH), - usePTY=True)) -c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'], +factory.addStep(TestCommand(description='docker-registry-pr', logEnviron=False, + usePTY=True, command=['/docker-ci/dockertest/docker-registry', + Property('revision'), Property('repository'), Property('branch')])) +c['builders'] += [BuilderConfig(name='docker-registry-pr',slavenames=['buildworker'], factory=factory)] # Docker nightly release -nightlyrelease_cmd = ('docker version; docker run -i -t -privileged -e AWS_S3_BUCKET=' - 'test.docker.io dockerbuilder hack/dind dockerbuild.sh') factory = BuildFactory() factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False, - usePTY=True, command=nightlyrelease_cmd)) + usePTY=True, command=['/docker-ci/dockertest/nightlyrelease'])) c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'], factory=factory)] # Status -authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]), +authz_cfg = authz.Authz(auth=auth.BasicAuth([(WEB_USER, WEB_IRC_PWD)]), forceBuild='auth') c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)] c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True, change_hook_dialects={ 'github': True })) -c['status'].append(MailNotifier(fromaddr='buildbot@docker.io', +c['status'].append(MailNotifier(fromaddr='docker-test@docker.io', sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP], mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True, smtpUser=SMTP_USER, smtpPassword=SMTP_PWD)) c['status'].append(words.IRC("irc.freenode.net", "dockerqabot", - channels=[IRC_CHANNEL], password=IRC_PWD, allowForce=True, + channels=[IRC_CHANNEL], password=WEB_IRC_PWD, allowForce=True, notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1})) diff --git a/hack/infrastructure/docker-ci/buildbot/requirements.txt b/hack/infrastructure/docker-ci/buildbot/requirements.txt deleted file mode 100644 index d2dcf1d125..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -sqlalchemy<=0.7.9 -sqlalchemy-migrate>=0.7.2 -buildbot==0.8.7p1 -buildbot_slave==0.8.7p1 -nose==1.2.1 -requests==1.1.0 -flask==0.10.1 -simplejson==2.3.2 -selenium==2.35.0 diff --git a/hack/infrastructure/docker-ci/buildbot/setup.sh b/hack/infrastructure/docker-ci/buildbot/setup.sh deleted file mode 100755 index c5d9cb988e..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/setup.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# Setup of buildbot configuration. Package installation is being done by -# Vagrantfile -# Dependencies: buildbot, buildbot-slave, supervisor - -USER=$1 -CFG_PATH=$2 -DOCKER_PATH=$3 -BUILDBOT_PWD=$4 -IRC_PWD=$5 -IRC_CHANNEL=$6 -SMTP_USER=$7 -SMTP_PWD=$8 -EMAIL_RCP=$9 -REGISTRY_USER=${10} -REGISTRY_PWD=${11} -REGISTRY_BUCKET=${12} -REGISTRY_ACCESS_KEY=${13} -REGISTRY_SECRET_KEY=${14} -BUILDBOT_PATH="/data/buildbot" -SLAVE_NAME="buildworker" -SLAVE_SOCKET="localhost:9989" - -export PATH="/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin" - -function run { su $USER -c "$1"; } - -# Exit if buildbot has already been installed -[ -d "$BUILDBOT_PATH" ] && exit 0 - -# Setup buildbot -run "mkdir -p $BUILDBOT_PATH" -cd $BUILDBOT_PATH -run "buildbot create-master master" -run "cp $CFG_PATH/master.cfg master" -run "sed -i -E 's#(BUILDBOT_PWD = ).+#\1\"$BUILDBOT_PWD\"#' master/master.cfg" -run "sed -i -E 's#(IRC_PWD = ).+#\1\"$IRC_PWD\"#' master/master.cfg" -run "sed -i -E 's#(IRC_CHANNEL = ).+#\1\"$IRC_CHANNEL\"#' master/master.cfg" -run "sed -i -E 's#(SMTP_USER = ).+#\1\"$SMTP_USER\"#' master/master.cfg" -run "sed -i -E 's#(SMTP_PWD = ).+#\1\"$SMTP_PWD\"#' master/master.cfg" -run "sed -i -E 's#(EMAIL_RCP = ).+#\1\"$EMAIL_RCP\"#' master/master.cfg" -run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD" -run "echo 'export DOCKER_CREDS=\"$REGISTRY_USER:$REGISTRY_PWD\"' > $BUILDBOT_PATH/master/credentials.cfg" -run "echo 'export S3_BUCKET=\"$REGISTRY_BUCKET\"' >> $BUILDBOT_PATH/master/credentials.cfg" -run "echo 'export S3_ACCESS_KEY=\"$REGISTRY_ACCESS_KEY\"' >> $BUILDBOT_PATH/master/credentials.cfg" -run "echo 'export S3_SECRET_KEY=\"$REGISTRY_SECRET_KEY\"' >> $BUILDBOT_PATH/master/credentials.cfg" - -# Patch github webstatus to capture pull requests -cp $CFG_PATH/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks - -# Allow buildbot subprocesses (docker tests) to properly run in containers, -# in particular with docker -u -run "sed -i 's/^umask = None/umask = 000/' slave/buildbot.tac" - -# Setup supervisor -cp $CFG_PATH/buildbot.conf /etc/supervisor/conf.d/buildbot.conf -sed -i -E "s/^chmod=0700.+/chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf -kill -HUP $(pgrep -f "/usr/bin/python /usr/bin/supervisord") diff --git a/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml new file mode 100644 index 0000000000..523535446a --- /dev/null +++ b/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml @@ -0,0 +1,22 @@ +docker-ci: + image: "docker-ci/docker-ci" + release_name: "docker-ci-0.5.6" + ports: ["80","2222:22","8011:8011"] + register: "80" + volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] + command: [] + env: + - "DEPLOYMENT=production" + - "IRC_CHANNEL=docker-testing" + - "BACKUP_BUCKET=backup-ci" + - "$WEB_USER" + - "$WEB_IRC_PWD" + - "$BUILDBOT_PWD" + - "$AWS_ACCESS_KEY" + - "$AWS_SECRET_KEY" + - "$GPG_PASSPHRASE" + - "$BACKUP_AWS_ID" + - "$BACKUP_AWS_SECRET" + - "$SMTP_USER" + - "$SMTP_PWD" + - "$EMAIL_RCP" diff --git a/hack/infrastructure/docker-ci/dcr/prod/settings.yml b/hack/infrastructure/docker-ci/dcr/prod/settings.yml new file mode 100644 index 0000000000..9831afa6dd --- /dev/null +++ b/hack/infrastructure/docker-ci/dcr/prod/settings.yml @@ -0,0 +1,5 @@ +default: + hipaches: ['192.168.100.67:6379'] + daemons: ['192.168.100.67:4243'] + use_ssh: False + diff --git a/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml new file mode 100644 index 0000000000..8eba84825c --- /dev/null +++ b/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml @@ -0,0 +1,22 @@ +docker-ci: + image: "docker-ci/docker-ci" + release_name: "docker-ci-stage" + ports: ["80","2222:22","8011:8011"] + register: "80" + volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] + command: [] + env: + - "DEPLOYMENT=staging" + - "IRC_CHANNEL=docker-testing-staging" + - "BACKUP_BUCKET=ci-backup-stage" + - "$BACKUP_AWS_ID" + - "$BACKUP_AWS_SECRET" + - "$WEB_USER" + - "$WEB_IRC_PWD" + - "$BUILDBOT_PWD" + - "$AWS_ACCESS_KEY" + - "$AWS_SECRET_KEY" + - "$GPG_PASSPHRASE" + - "$SMTP_USER" + - "$SMTP_PWD" + - "$EMAIL_RCP" diff --git a/hack/infrastructure/docker-ci/dcr/stage/settings.yml b/hack/infrastructure/docker-ci/dcr/stage/settings.yml new file mode 100644 index 0000000000..a7d37acff3 --- /dev/null +++ b/hack/infrastructure/docker-ci/dcr/stage/settings.yml @@ -0,0 +1,5 @@ +default: + hipaches: ['192.168.100.65:6379'] + daemons: ['192.168.100.65:4243'] + use_ssh: False + diff --git a/hack/infrastructure/docker-ci/deployment.py b/hack/infrastructure/docker-ci/deployment.py deleted file mode 100755 index fd0fdb0fe8..0000000000 --- a/hack/infrastructure/docker-ci/deployment.py +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/env python - -import os, sys, re, json, requests, base64 -from subprocess import call -from fabric import api -from fabric.api import cd, run, put, sudo -from os import environ as env -from datetime import datetime -from time import sleep - -# Remove SSH private key as it needs more processing -CONFIG = json.loads(re.sub(r'("DOCKER_CI_KEY".+?"(.+?)",)','', - env['CONFIG_JSON'], flags=re.DOTALL)) - -# Populate environment variables -for key in CONFIG: - env[key] = CONFIG[key] - -# Load SSH private key -env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1', - env['CONFIG_JSON'],flags=re.DOTALL) - -DROPLET_NAME = env.get('DROPLET_NAME','docker-ci') -TIMEOUT = 120 # Seconds before timeout droplet creation -IMAGE_ID = 1004145 # Docker on Ubuntu 13.04 -REGION_ID = 4 # New York 2 -SIZE_ID = 62 # memory 2GB -DO_IMAGE_USER = 'root' # Image user on Digital Ocean -API_URL = 'https://api.digitalocean.com/' -DOCKER_PATH = '/go/src/github.com/dotcloud/docker' -DOCKER_CI_PATH = '/docker-ci' -CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH) - - -class DigitalOcean(): - - def __init__(self, key, client): - '''Set default API parameters''' - self.key = key - self.client = client - self.api_url = API_URL - - def api(self, cmd_path, api_arg={}): - '''Make api call''' - api_arg.update({'api_key':self.key, 'client_id':self.client}) - resp = requests.get(self.api_url + cmd_path, params=api_arg).text - resp = json.loads(resp) - if resp['status'] != 'OK': - raise Exception(resp['error_message']) - return resp - - def droplet_data(self, name): - '''Get droplet data''' - data = self.api('droplets') - data = [droplet for droplet in data['droplets'] - if droplet['name'] == name] - return data[0] if data else {} - - -def json_fmt(data): - '''Format json output''' - return json.dumps(data, sort_keys = True, indent = 2) - - -do = DigitalOcean(env['DO_API_KEY'], env['DO_CLIENT_ID']) - -# Get DROPLET_NAME data -data = do.droplet_data(DROPLET_NAME) - -# Stop processing if DROPLET_NAME exists on Digital Ocean -if data: - print ('Droplet: {} already deployed. Not further processing.' - .format(DROPLET_NAME)) - exit(1) - -# Create droplet -do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID, - 'image_id':IMAGE_ID, 'size_id':SIZE_ID, - 'ssh_key_ids':[env['DOCKER_KEY_ID']]}) - -# Wait for droplet to be created. -start_time = datetime.now() -while (data.get('status','') != 'active' and ( - datetime.now()-start_time).seconds < TIMEOUT): - data = do.droplet_data(DROPLET_NAME) - print data['status'] - sleep(3) - -# Wait for the machine to boot -sleep(15) - -# Get droplet IP -ip = str(data['ip_address']) -print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip) - -# Create docker-ci ssh private key so docker-ci docker container can communicate -# with its EC2 instance -os.makedirs('/root/.ssh') -open('/root/.ssh/id_rsa','w').write(env['DOCKER_CI_KEY']) -os.chmod('/root/.ssh/id_rsa',0600) -open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n') - -api.env.host_string = ip -api.env.user = DO_IMAGE_USER -api.env.key_filename = '/root/.ssh/id_rsa' - -# Correct timezone -sudo('echo "America/Los_Angeles" >/etc/timezone') -sudo('dpkg-reconfigure --frontend noninteractive tzdata') - -# Load public docker-ci key -sudo("echo '{}' >> /root/.ssh/authorized_keys".format(env['DOCKER_CI_PUB'])) - -# Create docker nightly release credentials file -credentials = { - 'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'], - 'AWS_SECRET_KEY': env['PKG_SECRET_KEY'], - 'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE']} -open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write( - base64.b64encode(json.dumps(credentials))) - -# Transfer docker -sudo('mkdir -p ' + DOCKER_CI_PATH) -sudo('chown {}.{} {}'.format(DO_IMAGE_USER, DO_IMAGE_USER, DOCKER_CI_PATH)) -call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip, - os.path.dirname(DOCKER_CI_PATH)), shell=True) - -# Install Docker and Buildbot dependencies -sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker') -sudo('apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9') -sudo('echo deb https://get.docker.io/ubuntu docker main >' - ' /etc/apt/sources.list.d/docker.list') -sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n' - 'deb http://us.archive.ubuntu.com/ubuntu/ raring-security main universe\n"' - ' > /etc/apt/sources.list; apt-get update') -sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev' - ' python-pip supervisor git mercurial linux-image-extra-$(uname -r)' - ' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev') -sudo('wget -O - https://go.googlecode.com/files/go1.2.linux-amd64.tar.gz | ' - 'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go') -sudo('GOPATH=/go go get -d github.com/dotcloud/docker') -sudo('pip install -r {}/requirements.txt'.format(CFG_PATH)) - -# Install docker and testing dependencies -sudo('apt-get install -y -q lxc-docker') -sudo('curl -s https://phantomjs.googlecode.com/files/' - 'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin' - ' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs') - -# Build docker-ci containers -sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH)) -sudo('cd {}; docker build -t docker-ci .'.format(DOCKER_CI_PATH)) -sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format( - DOCKER_CI_PATH)) -sudo('cd {}/registry-coverage; docker build -t registry_coverage .'.format( - DOCKER_CI_PATH)) - -# Download docker-ci testing container -sudo('docker pull mzdaniel/test_docker') - -# Setup buildbot -sudo('mkdir /data') -sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}' - ' {11} {12}'.format(CFG_PATH, DOCKER_PATH, env['BUILDBOT_PWD'], - env['IRC_PWD'], env['IRC_CHANNEL'], env['SMTP_USER'], - env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'], - env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'], - env['REGISTRY_SECRET_KEY'])) - -# Preventively reboot docker-ci daily -sudo('ln -s /sbin/reboot /etc/cron.daily') diff --git a/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh b/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh deleted file mode 100755 index c29ede5b81..0000000000 --- a/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -set -x -# Generate a random string of $1 characters -function random { - cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1 -} - -# Compute test paths -BASE_PATH=`pwd`/test_docker_$(random 12) -DOCKER_PATH=$BASE_PATH/go/src/github.com/dotcloud/docker -export GOPATH=$BASE_PATH/go:$DOCKER_PATH/vendor - -# Fetch latest master -mkdir -p $DOCKER_PATH -cd $DOCKER_PATH -git init . -git fetch -q http://github.com/dotcloud/docker master -git reset --hard FETCH_HEAD - -# Fetch go coverage -cd $BASE_PATH/go -GOPATH=$BASE_PATH/go go get github.com/axw/gocov/gocov -sudo -E GOPATH=$GOPATH ./bin/gocov test -deps -exclude-goroot -v\ - -exclude github.com/gorilla/context,github.com/gorilla/mux,github.com/kr/pty,\ -code.google.com/p/go.net/websocket\ - github.com/dotcloud/docker | ./bin/gocov report; exit_status=$? - -# Cleanup testing directory -rm -rf $BASE_PATH - -exit $exit_status diff --git a/hack/infrastructure/docker-ci/docker-test/Dockerfile b/hack/infrastructure/docker-ci/docker-test/Dockerfile deleted file mode 100644 index 0f3a63f5f1..0000000000 --- a/hack/infrastructure/docker-ci/docker-test/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -# VERSION: 0.4 -# DOCKER-VERSION 0.6.6 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Testing docker PRs and commits on top of master using -# REFERENCES: This code reuses the excellent implementation of -# Docker in Docker made by Jerome Petazzoni. -# https://github.com/jpetazzo/dind -# COMMENTS: -# This Dockerfile adapts /Dockerfile to enable docker PRs and commits testing -# Optional arguments: -# [commit] (default: 'HEAD') -# [repo] (default: 'http://github.com/dotcloud/docker') -# [branch] (default: 'master') -# TO_BUILD: docker build -t test_docker . -# TO_RUN: docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch] - -from docker -maintainer Daniel Mizyrycki - -# Setup go in PATH. Extracted from /Dockerfile -env PATH /usr/local/go/bin:$PATH - -# Add test_docker.sh -add test_docker.sh /usr/bin/test_docker.sh -run chmod +x /usr/bin/test_docker.sh diff --git a/hack/infrastructure/docker-ci/docker-test/test_docker.sh b/hack/infrastructure/docker-ci/docker-test/test_docker.sh deleted file mode 100755 index 14816706ed..0000000000 --- a/hack/infrastructure/docker-ci/docker-test/test_docker.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash - -set -x -COMMIT=${1-HEAD} -REPO=${2-http://github.com/dotcloud/docker} -BRANCH=${3-master} - -# Compute test paths -DOCKER_PATH=/go/src/github.com/dotcloud/docker - -# Timestamp -echo -date; echo - -# Fetch latest master -cd / -rm -rf /go -git clone -q -b master http://github.com/dotcloud/docker $DOCKER_PATH -cd $DOCKER_PATH - -# Merge commit -git fetch -q "$REPO" "$BRANCH" -git merge --no-edit $COMMIT || exit 255 - -# Test commit -./hack/make.sh test; exit_status=$? - -# Display load if test fails -if [ $exit_status -ne 0 ] ; then - uptime; echo; free -fi - -exit $exit_status diff --git a/hack/infrastructure/docker-ci/dockertest/docker b/hack/infrastructure/docker-ci/dockertest/docker new file mode 120000 index 0000000000..e3f094ee63 --- /dev/null +++ b/hack/infrastructure/docker-ci/dockertest/docker @@ -0,0 +1 @@ +project \ No newline at end of file diff --git a/hack/infrastructure/docker-ci/dockertest/docker-registry b/hack/infrastructure/docker-ci/dockertest/docker-registry new file mode 120000 index 0000000000..e3f094ee63 --- /dev/null +++ b/hack/infrastructure/docker-ci/dockertest/docker-registry @@ -0,0 +1 @@ +project \ No newline at end of file diff --git a/hack/infrastructure/docker-ci/dockertest/nightlyrelease b/hack/infrastructure/docker-ci/dockertest/nightlyrelease new file mode 100755 index 0000000000..475b088065 --- /dev/null +++ b/hack/infrastructure/docker-ci/dockertest/nightlyrelease @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +if [ "$DEPLOYMENT" == "production" ]; then + AWS_S3_BUCKET='test.docker.io' +else + AWS_S3_BUCKET='get-staging.docker.io' +fi + +docker run -rm -privileged -v /run:/var/socket \ + -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \ + -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \ + -e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker + diff --git a/hack/infrastructure/docker-ci/dockertest/project b/hack/infrastructure/docker-ci/dockertest/project new file mode 100755 index 0000000000..160f2d5d59 --- /dev/null +++ b/hack/infrastructure/docker-ci/dockertest/project @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -x + +PROJECT_NAME=$(basename $0) + +docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \ + -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3 + diff --git a/hack/infrastructure/docker-ci/nginx/nginx.conf b/hack/infrastructure/docker-ci/nginx/nginx.conf new file mode 100644 index 0000000000..6649741134 --- /dev/null +++ b/hack/infrastructure/docker-ci/nginx/nginx.conf @@ -0,0 +1,12 @@ +server { + listen 80; + root /data/docker-ci; + + location / { + proxy_pass http://localhost:8000/; + } + + location /coverage { + root /data/docker-ci; + } +} diff --git a/hack/infrastructure/docker-ci/nightlyrelease/Dockerfile b/hack/infrastructure/docker-ci/nightlyrelease/Dockerfile deleted file mode 100644 index 2100a9e8e9..0000000000 --- a/hack/infrastructure/docker-ci/nightlyrelease/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -# VERSION: 1.6 -# DOCKER-VERSION 0.6.6 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Build docker nightly release using Docker in Docker. -# REFERENCES: This code reuses the excellent implementation of docker in docker -# made by Jerome Petazzoni. https://github.com/jpetazzo/dind -# COMMENTS: -# release_credentials.json is a base64 json encoded file containing: -# { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id", -# "AWS_SECRET_KEY": "Test_docker_AWS_S3_bucket_key", -# "GPG_PASSPHRASE": "Test_docker_GPG_passphrase_signature" } -# TO_BUILD: docker build -t dockerbuilder . -# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder hack/dind dockerbuild.sh - -from docker -maintainer Daniel Mizyrycki - -# Add docker dependencies and downloading packages -run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list -run apt-get update; apt-get install -y -q wget python2.7 - -# Add production docker binary -run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker - -# Add proto docker builder -add ./dockerbuild.sh /usr/bin/dockerbuild.sh -run chmod +x /usr/bin/dockerbuild.sh - -# Add release credentials -add ./release_credentials.json /root/release_credentials.json diff --git a/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh b/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh deleted file mode 100644 index d5e58da7e1..0000000000 --- a/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY and PG_PASSPHRASE are decoded -# from /root/release_credentials.json -# Variable AWS_S3_BUCKET is passed to the environment from docker run -e - -# Turn debug off to load credentials from the environment -set +x -eval $(cat /root/release_credentials.json | python -c ' -import sys,json,base64; -d=json.loads(base64.b64decode(sys.stdin.read())); -exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")') - -# Fetch docker master branch -set -x -cd / -rm -rf /go -git clone -q -b master http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker -cd /go/src/github.com/dotcloud/docker - -# Launch docker daemon using dind inside the container -/usr/bin/docker version -/usr/bin/docker -d & -sleep 5 - -# Build Docker release container -docker build -t docker . - -# Test docker and if everything works well, release -echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX docker hack/release.sh -set +x -docker run -privileged -i -t -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh -exit_status=$? - -# Display load if test fails -set -x -if [ $exit_status -ne 0 ] ; then - uptime; echo; free - exit 1 -fi diff --git a/hack/infrastructure/docker-ci/registry-coverage/Dockerfile b/hack/infrastructure/docker-ci/registry-coverage/Dockerfile deleted file mode 100644 index e544645b67..0000000000 --- a/hack/infrastructure/docker-ci/registry-coverage/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# VERSION: 0.1 -# DOCKER-VERSION 0.6.4 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Docker registry coverage -# COMMENTS: Add registry coverage into the docker-ci image -# TO_BUILD: docker build -t registry_coverage . -# TO_RUN: docker run registry_coverage - -from docker-ci -maintainer Daniel Mizyrycki - -# Add registry_coverager.sh and dependencies -run pip install coverage flask pyyaml requests simplejson python-glanceclient \ - blinker redis boto gevent rsa mock -add registry_coverage.sh /usr/bin/registry_coverage.sh -run chmod +x /usr/bin/registry_coverage.sh - -cmd "/usr/bin/registry_coverage.sh" diff --git a/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh b/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh deleted file mode 100755 index c67b17eba0..0000000000 --- a/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -set -x - -# Setup the environment -REGISTRY_PATH=/data/docker-registry -export SETTINGS_FLAVOR=test -export DOCKER_REGISTRY_CONFIG=config_test.yml -export PYTHONPATH=$REGISTRY_PATH/test - -# Fetch latest docker-registry master -rm -rf $REGISTRY_PATH -git clone https://github.com/dotcloud/docker-registry -b master $REGISTRY_PATH -cd $REGISTRY_PATH - -# Generate coverage -coverage run -m unittest discover test || exit 1 -coverage report --include='./*' --omit='./test/*' diff --git a/hack/infrastructure/docker-ci/setup.sh b/hack/infrastructure/docker-ci/setup.sh new file mode 100755 index 0000000000..65a00f6dd0 --- /dev/null +++ b/hack/infrastructure/docker-ci/setup.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Set timezone +echo "GMT" >/etc/timezone +dpkg-reconfigure --frontend noninteractive tzdata + +# Set ssh superuser +mkdir -p /data/buildbot /var/run/sshd /run +useradd -m -d /home/sysadmin -s /bin/bash -G sudo,docker -p '*' sysadmin +sed -Ei 's/(\%sudo.*) ALL/\1 NOPASSWD:ALL/' /etc/sudoers +cd /home/sysadmin +mkdir .ssh +chmod 700 .ssh +cat > .ssh/authorized_keys << 'EOF' +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7ALVhwQ68q1SjrKaAduOuOEAcWmb8kDZf5qA7T1fM8AP07EDC7nSKRJ8PXUBGTOQfxm89coJDuSJsTAZ+1PvglXhA0Mq6+knc6ZrZY+SuZlDIDAk4TOdVPoDZnmR1YW2McxHkhcGIOKeC8MMig5NeEjtgQwXzauUSPqeh8HMlLZRMooFYyyluIpn7NaCLzyWjwAQz2s3KyI7VE7hl+ncCrW86v+dciEdwqtzNoUMFb3iDpPxaiCl3rv+SB7co/5eUDTs1FZvUcYMXKQuf8R+2ZKzXOpwr0Zs8sKQXvXavCeWykwGgXLBjVkvrDcHuDD6UXCW63UKgmRECpLZaMBVIIRWLEEgTS5OSQTcxpMVe5zUW6sDvXHTcdPwWrcn1dE9F/0vLC0HJ4ADKelLX5zyTpmXGbuZuntIf1JO67D/K/P++uV1rmVIH+zgtOf23w5rX2zKb4BSTqP0sv61pmWV7MEVoEz6yXswcTjS92tb775v7XLU9vKAkt042ORFdE4/++hejhL/Lj52IRgjt1CJZHZsR9JywJZrz3kYuf8eU2J2FYh0Cpz5gmf0f+12Rt4HztnZxGPP4KuMa66e4+hpx1jynjMZ7D5QUnNYEmuvJByopn8HSluuY/kS5MMyZCZtJLEPGX4+yECX0Di/S0vCRl2NyqfCBqS+yXXT5SA1nFw== docker-test@docker.io +EOF +chmod 600 .ssh/authorized_keys +chown -R sysadmin .ssh + +# Fix docker group id for use of host dockerd by sysadmin +sed -Ei 's/(docker:x:)[^:]+/\1999/' /etc/group + +# Create buildbot configuration +cd /data/buildbot; buildbot create-master master +cp -a /data/buildbot/master/master.cfg.sample \ + /data/buildbot/master/master.cfg +cd /data/buildbot; \ + buildslave create-slave slave localhost:9989 buildworker pass +cp /docker-ci/buildbot/master.cfg /data/buildbot/master + +# Patch github webstatus to capture pull requests +cp /docker-ci/buildbot/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks +chown -R sysadmin.sysadmin /data + +# Create nginx configuration +rm /etc/nginx/sites-enabled/default +cp /docker-ci/nginx/nginx.conf /etc/nginx/conf.d/buildbot.conf +/bin/echo -e '\ndaemon off;\n' >> /etc/nginx/nginx.conf + +# Set supervisord buildbot, nginx and sshd processes +/bin/echo -e "\ +[program:buildmaster]\n\ +command=twistd --nodaemon --no_save -y buildbot.tac\n\ +directory=/data/buildbot/master\n\ +user=sysadmin\n\n\ +[program:buildworker]\n\ +command=twistd --nodaemon --no_save -y buildbot.tac\n\ +directory=/data/buildbot/slave\n\ +user=sysadmin\n" > \ + /etc/supervisor/conf.d/buildbot.conf +/bin/echo -e "[program:nginx]\ncommand=/usr/sbin/nginx\n" > \ + /etc/supervisor/conf.d/nginx.conf +/bin/echo -e "[program:sshd]\ncommand=/usr/sbin/sshd -D\n" > \ + /etc/supervisor/conf.d/sshd.conf diff --git a/hack/infrastructure/docker-ci/testbuilder/Dockerfile b/hack/infrastructure/docker-ci/testbuilder/Dockerfile new file mode 100644 index 0000000000..a008da6843 --- /dev/null +++ b/hack/infrastructure/docker-ci/testbuilder/Dockerfile @@ -0,0 +1,12 @@ +# TO_BUILD: docker build -rm -no-cache -t docker-ci/testbuilder . +# TO_RUN: docker run -rm -u sysadmin \ +# -v /run:/var/socket docker-ci/testbuilder docker-registry +# + +FROM docker-ci/docker-ci +ENV HOME /home/sysadmin + +RUN mkdir /testbuilder +ADD . /testbuilder + +ENTRYPOINT ["/testbuilder/testbuilder.sh"] diff --git a/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh new file mode 100755 index 0000000000..72087462ad --- /dev/null +++ b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -x +set -e +PROJECT_PATH=$1 + +# Build the docker project +cd /data/$PROJECT_PATH +sg docker -c "docker build -q -rm -t registry ." +cd test; sg docker -c "docker build -q -rm -t docker-registry-test ." + +# Run the tests +sg docker -c "docker run -rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test" diff --git a/hack/infrastructure/docker-ci/testbuilder/docker.sh b/hack/infrastructure/docker-ci/testbuilder/docker.sh new file mode 100755 index 0000000000..b365dd7eaf --- /dev/null +++ b/hack/infrastructure/docker-ci/testbuilder/docker.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -x +set -e +PROJECT_PATH=$1 + +# Build the docker project +cd /data/$PROJECT_PATH +sg docker -c "docker build -q -rm -t docker ." + +if [ "$DOCKER_RELEASE" == "1" ]; then + # Do nightly release + echo sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh" + set +x + sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh" +else + # Run the tests + sg docker -c "docker run -rm -privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh" +fi diff --git a/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh b/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh new file mode 100755 index 0000000000..70701343c2 --- /dev/null +++ b/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Download, build and run a docker project tests +# Environment variables: DEPLOYMENT + +cat $0 +set -e +set -x + +PROJECT=$1 +COMMIT=${2-HEAD} +REPO=${3-https://github.com/dotcloud/$PROJECT} +BRANCH=${4-master} +REPO_PROJ="https://github.com/docker-test/$PROJECT" +if [ "$DEPLOYMENT" == "production" ]; then + REPO_PROJ="https://github.com/dotcloud/$PROJECT" +fi +set +x + +# Generate a random string of $1 characters +function random { + cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1 +} + +PROJECT_PATH="$PROJECT-tmp-$(random 12)" + +# Set docker-test git user +set -x +git config --global user.email "docker-test@docker.io" +git config --global user.name "docker-test" + +# Fetch project +git clone -q $REPO_PROJ -b master /data/$PROJECT_PATH +cd /data/$PROJECT_PATH +echo "Git commit: $(git rev-parse HEAD)" +git fetch -q $REPO $BRANCH +git merge --no-edit $COMMIT + +# Build the project dockertest +/testbuilder/$PROJECT.sh $PROJECT_PATH +rm -rf /data/$PROJECT_PATH diff --git a/hack/infrastructure/docker-ci/tool/backup.py b/hack/infrastructure/docker-ci/tool/backup.py new file mode 100755 index 0000000000..2db633e526 --- /dev/null +++ b/hack/infrastructure/docker-ci/tool/backup.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python + +import os,sys,json +from datetime import datetime +from filecmp import cmp +from subprocess import check_call +from boto.s3.key import Key +from boto.s3.connection import S3Connection + +def ENV(x): + '''Promote an environment variable for global use returning its value''' + retval = os.environ.get(x, '') + globals()[x] = retval + return retval + +ROOT_PATH = '/data/backup/docker-ci' +TODAY = str(datetime.today())[:10] +BACKUP_FILE = '{}/docker-ci_{}.tgz'.format(ROOT_PATH, TODAY) +BACKUP_LINK = '{}/docker-ci.tgz'.format(ROOT_PATH) +ENV('BACKUP_BUCKET') +ENV('BACKUP_AWS_ID') +ENV('BACKUP_AWS_SECRET') + +'''Create full master buildbot backup, avoiding duplicates''' +# Ensure backup path exist +if not os.path.exists(ROOT_PATH): + os.makedirs(ROOT_PATH) +# Make actual backups +check_call('/bin/tar czf {} -C /data --exclude=backup --exclude=buildbot/slave' + ' . 1>/dev/null 2>&1'.format(BACKUP_FILE),shell=True) +# remove previous dump if it is the same as the latest +if (os.path.exists(BACKUP_LINK) and cmp(BACKUP_FILE, BACKUP_LINK) and + os.path._resolve_link(BACKUP_LINK) != BACKUP_FILE): + os.unlink(os.path._resolve_link(BACKUP_LINK)) +# Recreate backup link pointing to latest backup +try: + os.unlink(BACKUP_LINK) +except: + pass +os.symlink(BACKUP_FILE, BACKUP_LINK) + +# Make backup on S3 +bucket = S3Connection(BACKUP_AWS_ID,BACKUP_AWS_SECRET).get_bucket(BACKUP_BUCKET) +k = Key(bucket) +k.key = BACKUP_FILE +k.set_contents_from_filename(BACKUP_FILE) +bucket.copy_key(os.path.basename(BACKUP_LINK),BACKUP_BUCKET,BACKUP_FILE[1:]) diff --git a/hack/make.sh b/hack/make.sh index ef13c1a283..63edca4d4c 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -16,7 +16,7 @@ set -e # in the Dockerfile at the root of the source. In other words: # DO NOT CALL THIS SCRIPT DIRECTLY. # - The right way to call this script is to invoke "make" from -# your checkout of the Docker repository. +# your checkout of the Docker repository. # the Makefile will do a "docker build -t docker ." and then # "docker run hack/make.sh" in the resulting container image. # @@ -53,9 +53,9 @@ DEFAULT_BUNDLES=( ) VERSION=$(cat ./VERSION) -if [ -d .git ] && command -v git &> /dev/null; then +if command -v git &> /dev/null && git rev-parse &> /dev/null; then GITCOMMIT=$(git rev-parse --short HEAD) - if [ -n "$(git status --porcelain)" ]; then + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then GITCOMMIT="$GITCOMMIT-dirty" fi elif [ "$DOCKER_GITCOMMIT" ]; then @@ -68,10 +68,37 @@ else exit 1 fi +if [ "$AUTO_GOPATH" ]; then + rm -rf .gopath + mkdir -p .gopath/src/github.com/dotcloud + ln -sf ../../../.. .gopath/src/github.com/dotcloud/docker + export GOPATH="$(pwd)/.gopath:$(pwd)/vendor" +fi + +if [ ! "$GOPATH" ]; then + echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH' + echo >&2 ' alternatively, set AUTO_GOPATH=1' + exit 1 +fi + # Use these flags when compiling the tests and final binary -LDFLAGS='-X main.GITCOMMIT "'$GITCOMMIT'" -X main.VERSION "'$VERSION'" -w' -LDFLAGS_STATIC='-X github.com/dotcloud/docker/utils.IAMSTATIC true -linkmode external -extldflags "-lpthread -static -Wl,--unresolved-symbols=ignore-in-object-files"' -BUILDFLAGS='-tags netgo -a' +LDFLAGS=' + -w + -X github.com/dotcloud/docker/dockerversion.GITCOMMIT "'$GITCOMMIT'" + -X github.com/dotcloud/docker/dockerversion.VERSION "'$VERSION'" +' +LDFLAGS_STATIC='-linkmode external' +EXTLDFLAGS_STATIC='-static' +BUILDFLAGS=( -a -tags "netgo $DOCKER_BUILDTAGS" ) + +# A few more flags that are specific just to building a completely-static binary (see hack/make/binary) +# PLEASE do not use these anywhere else. +EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files" +LDFLAGS_STATIC_DOCKER=" + $LDFLAGS_STATIC + -X github.com/dotcloud/docker/dockerversion.IAMSTATIC true + -extldflags \"$EXTLDFLAGS_STATIC_DOCKER\" +" HAVE_GO_TEST_COVER= if \ @@ -88,21 +115,32 @@ fi # go_test_dir() { dir=$1 + coverpkg=$2 testcover=() if [ "$HAVE_GO_TEST_COVER" ]; then # if our current go install has -cover, we want to use it :) mkdir -p "$DEST/coverprofiles" coverprofile="docker${dir#.}" coverprofile="$DEST/coverprofiles/${coverprofile//\//-}" - testcover=( -cover -coverprofile "$coverprofile" ) + testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) fi ( set -x cd "$dir" - go test ${testcover[@]} -ldflags "$LDFLAGS" $BUILDFLAGS $TESTFLAGS + go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS ) } +# This helper function walks the current directory looking for directories +# holding certain files ($1 parameter), and prints their paths on standard +# output, one per line. +find_dirs() { + find -not \( \ + \( -wholename './vendor' -o -wholename './integration' -o -wholename './contrib' -o -wholename './pkg/mflag/example' \) \ + -prune \ + \) -name "$1" -print0 | xargs -0n1 dirname | sort -u +} + bundle() { bundlescript=$1 bundle=$(basename $bundlescript) diff --git a/hack/make/binary b/hack/make/binary index 93e99fee8f..7272b1ede0 100644 --- a/hack/make/binary +++ b/hack/make/binary @@ -2,5 +2,12 @@ DEST=$1 -go build -o $DEST/docker-$VERSION -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS ./docker +go build \ + -o $DEST/docker-$VERSION \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC_DOCKER + " \ + ./docker echo "Created binary: $DEST/docker-$VERSION" diff --git a/hack/make/cross b/hack/make/cross index a67ab6c28a..e8f90e29b7 100644 --- a/hack/make/cross +++ b/hack/make/cross @@ -17,7 +17,7 @@ for platform in $DOCKER_CROSSPLATFORMS; do mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION export GOOS=${platform%/*} export GOARCH=${platform##*/} - export LDFLAGS_STATIC="" # we just need a simple client for these platforms (TODO this might change someday) + export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms (TODO this might change someday) source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform" ) done diff --git a/hack/make/dynbinary b/hack/make/dynbinary index c02094c0c5..d4f583fb62 100644 --- a/hack/make/dynbinary +++ b/hack/make/dynbinary @@ -2,16 +2,40 @@ DEST=$1 -# dockerinit still needs to be a static binary, even if docker is dynamic -CGO_ENABLED=0 go build -o $DEST/dockerinit-$VERSION -ldflags "$LDFLAGS -d" $BUILDFLAGS ./dockerinit -echo "Created binary: $DEST/dockerinit-$VERSION" -ln -sf dockerinit-$VERSION $DEST/dockerinit - -# sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another -export DOCKER_INITSHA1="$(sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" +if [ -z "$DOCKER_CLIENTONLY" ]; then + # dockerinit still needs to be a static binary, even if docker is dynamic + go build \ + -o $DEST/dockerinit-$VERSION \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC + -extldflags \"$EXTLDFLAGS_STATIC\" + " \ + ./dockerinit + echo "Created binary: $DEST/dockerinit-$VERSION" + ln -sf dockerinit-$VERSION $DEST/dockerinit + + sha1sum= + if command -v sha1sum &> /dev/null; then + sha1sum=sha1sum + elif command -v shasum &> /dev/null; then + # Mac OS X - why couldn't they just use the same command name and be happy? + sha1sum=shasum + else + echo >&2 'error: cannot find sha1sum command or equivalent' + exit 1 + fi + + # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another + export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" +else + # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) + export DOCKER_INITSHA1="" +fi # exported so that "dyntest" can easily access it later without recalculating it ( - export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\" -X github.com/dotcloud/docker/utils.INITPATH \"$DOCKER_INITPATH\"" + export LDFLAGS_STATIC_DOCKER="-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" -X github.com/dotcloud/docker/dockerversion.INITPATH \"$DOCKER_INITPATH\"" source "$(dirname "$BASH_SOURCE")/binary" ) diff --git a/hack/make/dyntest b/hack/make/dyntest index eb5c2b73ed..744db3e999 100644 --- a/hack/make/dyntest +++ b/hack/make/dyntest @@ -12,6 +12,8 @@ fi ( export TEST_DOCKERINIT_PATH="$INIT" - export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" + export LDFLAGS_STATIC_DOCKER=" + -X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" + " source "$(dirname "$BASH_SOURCE")/test" ) diff --git a/hack/make/dyntest-integration b/hack/make/dyntest-integration index 0887c45be0..ef7e6a5a41 100644 --- a/hack/make/dyntest-integration +++ b/hack/make/dyntest-integration @@ -12,6 +12,8 @@ fi ( export TEST_DOCKERINIT_PATH="$INIT" - export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" + export LDFLAGS_STATIC_DOCKER=" + -X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" + " source "$(dirname "$BASH_SOURCE")/test-integration" ) diff --git a/hack/make/test b/hack/make/test index 760c5a5fc6..39ba5cd3a5 100644 --- a/hack/make/test +++ b/hack/make/test @@ -4,9 +4,9 @@ DEST=$1 set -e -TEXTRESET=$'\033[0m' # reset the foreground colour RED=$'\033[31m' GREEN=$'\033[32m' +TEXTRESET=$'\033[0m' # reset the foreground colour # Run Docker's test suite, including sub-packages, and store their output as a bundle # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. @@ -19,10 +19,10 @@ bundle_test() { date TESTS_FAILED=() - for test_dir in $(find_test_dirs); do + for test_dir in $(find_dirs '*_test.go'); do echo - if ! LDFLAGS="$LDFLAGS $LDFLAGS_STATIC" go_test_dir "$test_dir"; then + if ! LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir "$test_dir"; then TESTS_FAILED+=("$test_dir") echo echo "${RED}Tests failed: $test_dir${TEXTRESET}" @@ -48,15 +48,4 @@ bundle_test() { } 2>&1 | tee $DEST/test.log } - -# This helper function walks the current directory looking for directories -# holding Go test files, and prints their paths on standard output, one per -# line. -find_test_dirs() { - find -not \( \ - \( -wholename './vendor' -o -wholename './integration' \) \ - -prune \ - \) -name '*_test.go' -print0 | xargs -0n1 dirname | sort -u -} - bundle_test diff --git a/hack/make/test-integration b/hack/make/test-integration index f1ab0b99c3..0af4c23c48 100644 --- a/hack/make/test-integration +++ b/hack/make/test-integration @@ -5,7 +5,12 @@ DEST=$1 set -e bundle_test_integration() { - LDFLAGS="$LDFLAGS $LDFLAGS_STATIC" go_test_dir ./integration + LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir ./integration \ + "-coverpkg $(find_dirs '*.go' | sed 's,^\.,github.com/dotcloud/docker,g' | paste -d, -s)" } -bundle_test_integration 2>&1 | tee $DEST/test.log +# this "grep" hides some really irritating warnings that "go test -coverpkg" +# spews when it is given packages that aren't used +bundle_test_integration 2>&1 \ + | grep --line-buffered -v '^warning: no packages being tested depend on ' \ + | tee $DEST/test.log diff --git a/hack/make/ubuntu b/hack/make/ubuntu index 1d309d2b5c..ebc12f27ec 100644 --- a/hack/make/ubuntu +++ b/hack/make/ubuntu @@ -29,42 +29,36 @@ bundle_ubuntu() { cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/ # Include our init scripts - mkdir -p $DIR/etc - cp -R contrib/init/upstart $DIR/etc/init - cp -R contrib/init/sysvinit $DIR/etc/init.d - mkdir -p $DIR/lib/systemd - cp -R contrib/init/systemd $DIR/lib/systemd/system - + mkdir -p $DIR/etc/init + cp contrib/init/upstart/docker.conf $DIR/etc/init/ + mkdir -p $DIR/etc/init.d + cp contrib/init/sysvinit-debian/docker $DIR/etc/init.d/ mkdir -p $DIR/etc/default - cat > $DIR/etc/default/docker <<'EOF' -# Docker Upstart and SysVinit configuration file - -# Customize location of Docker binary (especially for development testing). -#DOCKER="/usr/local/bin/docker" - -# Use DOCKER_OPTS to modify the daemon startup options. -#DOCKER_OPTS="-dns 8.8.8.8" - -# If you need Docker to use an HTTP proxy, it can also be specified here. -#export http_proxy=http://127.0.0.1:3128/ -EOF + cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker + mkdir -p $DIR/lib/systemd/system + cp contrib/init/systemd/docker.service $DIR/lib/systemd/system/ # Copy the binary # This will fail if the binary bundle hasn't been built mkdir -p $DIR/usr/bin - # Copy the binary - # This will fail if the binary bundle hasn't been built cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker # Generate postinst/prerm/postrm scripts - cat > /tmp/postinst <<'EOF' + cat > $DEST/postinst <<'EOF' #!/bin/sh set -e set -u -getent group docker > /dev/null || groupadd --system docker || true +if [ "$1" = 'configure' ] && [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi +fi -update-rc.d docker defaults > /dev/null || true +if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then + # we only need to do this if upstart isn't in charge + update-rc.d docker defaults > /dev/null || true +fi if [ -n "$2" ]; then _dh_action=restart else @@ -74,7 +68,7 @@ service docker $_dh_action 2>/dev/null || true #DEBHELPER# EOF - cat > /tmp/prerm <<'EOF' + cat > $DEST/prerm <<'EOF' #!/bin/sh set -e set -u @@ -83,7 +77,7 @@ service docker stop 2>/dev/null || true #DEBHELPER# EOF - cat > /tmp/postrm <<'EOF' + cat > $DEST/postrm <<'EOF' #!/bin/sh set -e set -u @@ -101,48 +95,61 @@ fi #DEBHELPER# EOF # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way - chmod +x /tmp/postinst /tmp/prerm + chmod +x $DEST/postinst $DEST/prerm $DEST/postrm ( + # switch directories so we create *.deb in the right folder cd $DEST + + # create lxc-docker-VERSION package fpm -s dir -C $DIR \ - --name lxc-docker-$VERSION --version $PKGVERSION \ - --after-install /tmp/postinst \ - --before-remove /tmp/prerm \ - --after-remove /tmp/postrm \ - --architecture "$PACKAGE_ARCHITECTURE" \ - --prefix / \ - --depends lxc \ - --depends aufs-tools \ - --depends iptables \ - --deb-recommends ca-certificates \ - --deb-recommends xz-utils \ - --description "$PACKAGE_DESCRIPTION" \ - --maintainer "$PACKAGE_MAINTAINER" \ - --conflicts lxc-docker-virtual-package \ - --provides lxc-docker \ - --provides lxc-docker-virtual-package \ - --replaces lxc-docker \ - --replaces lxc-docker-virtual-package \ - --url "$PACKAGE_URL" \ - --license "$PACKAGE_LICENSE" \ - --config-files /etc/udev/rules.d/80-docker.rules \ - --config-files /etc/init/docker.conf \ - --config-files /etc/init.d/docker \ - --config-files /etc/default/docker \ - --deb-compression gz \ - -t deb . + --name lxc-docker-$VERSION --version $PKGVERSION \ + --after-install $DEST/postinst \ + --before-remove $DEST/prerm \ + --after-remove $DEST/postrm \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --prefix / \ + --depends iptables \ + --deb-recommends aufs-tools \ + --deb-recommends ca-certificates \ + --deb-recommends git \ + --deb-recommends xz-utils \ + --deb-suggests cgroup-lite \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ + --conflicts lxc-docker-virtual-package \ + --provides lxc-docker \ + --provides lxc-docker-virtual-package \ + --replaces lxc-docker \ + --replaces lxc-docker-virtual-package \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ + --config-files /etc/init/docker.conf \ + --config-files /etc/init.d/docker \ + --config-files /etc/default/docker \ + --deb-compression gz \ + -t deb . + # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available + + # create empty lxc-docker wrapper package fpm -s empty \ - --name lxc-docker --version $PKGVERSION \ - --architecture "$PACKAGE_ARCHITECTURE" \ - --depends lxc-docker-$VERSION \ - --description "$PACKAGE_DESCRIPTION" \ - --maintainer "$PACKAGE_MAINTAINER" \ - --url "$PACKAGE_URL" \ - --license "$PACKAGE_LICENSE" \ - --deb-compression gz \ - -t deb + --name lxc-docker --version $PKGVERSION \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --depends lxc-docker-$VERSION \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --deb-compression gz \ + -t deb ) + + # clean up after ourselves so we have a clean output directory + rm $DEST/postinst $DEST/prerm $DEST/postrm + rm -r $DIR } bundle_ubuntu diff --git a/hack/vendor.sh b/hack/vendor.sh index d3e7ea9f43..184cb750a5 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -50,3 +50,11 @@ clone git github.com/syndtr/gocapability 3454319be2 clone hg code.google.com/p/go.net 84a4013f96e0 clone hg code.google.com/p/gosqlite 74691fb6f837 + +# get Go tip's archive/tar, for xattr support +# TODO after Go 1.3 drops, bump our minimum supported version and drop this vendored dep +clone hg code.google.com/p/go a15f344a9efa +mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar +rm -rf src/code.google.com/p/go +mkdir -p src/code.google.com/p/go/src/pkg/archive +mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar diff --git a/image.go b/image.go index dbd2173597..fa5b65787c 100644 --- a/image.go +++ b/image.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -18,17 +19,17 @@ import ( ) type Image struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig Config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *Config `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - OS string `json:"os,omitempty"` + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig runconfig.Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *runconfig.Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + OS string `json:"os,omitempty"` graph *Graph Size int64 } @@ -66,7 +67,7 @@ func LoadImage(root string) (*Image, error) { return img, nil } -func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root, layer string) error { +func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root, layer string) error { // Store the layer var ( size int64 @@ -173,7 +174,11 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) { if err != nil { return nil, err } - return EofReader(archive, func() { driver.Put(img.ID) }), nil + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(img.ID) + return err + }), nil } parentFs, err := driver.Get(img.Parent) @@ -189,7 +194,11 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) { if err != nil { return nil, err } - return EofReader(archive, func() { driver.Put(img.ID) }), nil + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(img.ID) + return err + }), nil } func ValidateID(id string) error { diff --git a/integration/api_test.go b/integration/api_test.go index 82de56a8ba..cb92d89858 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -1,16 +1,19 @@ package docker import ( - "archive/tar" "bufio" "bytes" "encoding/json" "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" + "io/ioutil" "net" "net/http" "net/http/httptest" @@ -45,7 +48,7 @@ func TestGetVersion(t *testing.T) { t.Fatal(err) } out.Close() - expected := docker.VERSION + expected := dockerversion.VERSION if result := v.Get("Version"); result != expected { t.Errorf("Expected version %s, %s found", expected, result) } @@ -308,7 +311,7 @@ func TestGetContainersJSON(t *testing.T) { } beginLen := len(outs.Data) - containerID := createTestContainer(eng, &docker.Config{ + containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"echo", "test"}, }, t) @@ -345,7 +348,7 @@ func TestGetContainersExport(t *testing.T) { // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, @@ -387,13 +390,84 @@ func TestGetContainersExport(t *testing.T) { } } +func TestSaveImageAndThenLoad(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + // save image + r := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + tarball := r.Body + + // delete the image + r = httptest.NewRecorder() + req, err = http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + + // make sure there is no image + r = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusNotFound { + t.Fatalf("%d NotFound expected, received %d\n", http.StatusNotFound, r.Code) + } + + // load the image + r = httptest.NewRecorder() + req, err = http.NewRequest("POST", "/images/load", tarball) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + + // finally make sure the image is there + r = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } +} + func TestGetContainersChanges(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/rm", "/etc/passwd"}, }, @@ -432,7 +506,7 @@ func TestGetContainersTop(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "cat"}, OpenStdin: true, @@ -509,7 +583,7 @@ func TestGetContainersByName(t *testing.T) { // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"echo", "test"}, }, @@ -541,7 +615,7 @@ func TestPostCommit(t *testing.T) { // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, @@ -577,7 +651,7 @@ func TestPostContainersCreate(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - configJSON, err := json.Marshal(&docker.Config{ + configJSON, err := json.Marshal(&runconfig.Config{ Image: unitTestImageID, Memory: 33554432, Cmd: []string{"touch", "/test"}, @@ -619,7 +693,7 @@ func TestPostContainersKill(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, @@ -658,7 +732,7 @@ func TestPostContainersRestart(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, @@ -704,7 +778,7 @@ func TestPostContainersStart(t *testing.T) { containerID := createTestContainer( eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, @@ -712,7 +786,7 @@ func TestPostContainersStart(t *testing.T) { t, ) - hostConfigJSON, err := json.Marshal(&docker.HostConfig{}) + hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{}) req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) if err != nil { @@ -757,7 +831,7 @@ func TestRunErrorBindMountRootSource(t *testing.T) { containerID := createTestContainer( eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, @@ -765,7 +839,7 @@ func TestRunErrorBindMountRootSource(t *testing.T) { t, ) - hostConfigJSON, err := json.Marshal(&docker.HostConfig{ + hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{ Binds: []string{"/:/tmp"}, }) @@ -791,7 +865,7 @@ func TestPostContainersStop(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, @@ -831,7 +905,7 @@ func TestPostContainersWait(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sleep", "1"}, OpenStdin: true, @@ -869,7 +943,7 @@ func TestPostContainersAttach(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, @@ -947,7 +1021,7 @@ func TestPostContainersAttachStderr(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "/bin/cat >&2"}, OpenStdin: true, @@ -1028,7 +1102,7 @@ func TestDeleteContainers(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, @@ -1102,6 +1176,8 @@ func TestGetEnabledCors(t *testing.T) { func TestDeleteImages(t *testing.T) { eng := NewTestEngine(t) + //we expect errors, so we disable stderr + eng.Stderr = ioutil.Discard defer mkRuntimeFromEngine(eng, t).Nuke() initialImages := getImages(eng, t, true, "") @@ -1163,7 +1239,7 @@ func TestPostContainersCopy(t *testing.T) { // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test.txt"}, }, @@ -1215,6 +1291,34 @@ func TestPostContainersCopy(t *testing.T) { } } +func TestPostContainersCopyWhenContainerNotFound(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + r := httptest.NewRecorder() + + var copyData engine.Env + copyData.Set("Resource", "/test.txt") + copyData.Set("HostPath", ".") + + jsonData := bytes.NewBuffer(nil) + if err := copyData.Encode(jsonData); err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("POST", "/containers/id_not_found/copy", jsonData) + if err != nil { + t.Fatal(err) + } + req.Header.Add("Content-Type", "application/json") + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusNotFound { + t.Fatalf("404 expected for id_not_found Container, received %v", r.Code) + } +} + // Mocked types for tests type NopConn struct { io.ReadCloser diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index 6a7da70558..efab9707ec 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -14,16 +14,6 @@ import ( "testing" ) -// mkTestContext generates a build context from the contents of the provided dockerfile. -// This context is suitable for use as an argument to BuildFile.Build() -func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive { - context, err := docker.MkBuildContext(dockerfile, files) - if err != nil { - t.Fatal(err) - } - return context -} - // A testContextTemplate describes a build context and how to test it type testContextTemplate struct { // Contents of the Dockerfile @@ -34,6 +24,18 @@ type testContextTemplate struct { remoteFiles [][2]string } +func (context testContextTemplate) Archive(dockerfile string, t *testing.T) archive.Archive { + input := []string{"Dockerfile", dockerfile} + for _, pair := range context.files { + input = append(input, pair[0], pair[1]) + } + a, err := archive.Generate(input...) + if err != nil { + t.Fatal(err) + } + return a +} + // A table of all the contexts to build and test. // A new docker runtime will be created and torn down for each context. var testContexts = []testContextTemplate{ @@ -148,6 +150,65 @@ RUN [ "$(/hello.sh)" = "hello world" ] nil, }, + // Users and groups + { + ` +FROM {IMAGE} + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0:root' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1000:1000::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1000:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0:root' ] + +# Add a "supplementary" group for our dockerio user +RUN echo 'supplementary:x:1001:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ] +USER 1000 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] +USER 1000:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] +USER dockerio:1000 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] +USER 1000:1000 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] +USER 1000:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] +USER 1000:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ] +`, + nil, + nil, + }, + // Environment variable { ` @@ -322,7 +383,7 @@ func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, u dockerfile := constructDockerfile(context.dockerfile, ip, port) buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) - id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t)) + id, err := buildfile.Build(context.Archive(dockerfile, t)) if err != nil { return nil, err } @@ -726,7 +787,7 @@ func TestForbiddenContextPath(t *testing.T) { dockerfile := constructDockerfile(context.dockerfile, ip, port) buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) - _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) + _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { t.Log("Error should not be nil") @@ -772,7 +833,7 @@ func TestBuildADDFileNotFound(t *testing.T) { dockerfile := constructDockerfile(context.dockerfile, ip, port) buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) - _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) + _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { t.Log("Error should not be nil") @@ -863,3 +924,45 @@ func TestBuildOnBuildTrigger(t *testing.T) { } // FIXME: test that the 'foobar' file was created in the final build. } + +func TestBuildOnBuildForbiddenChainedTrigger(t *testing.T) { + _, err := buildImage(testContextTemplate{` + from {IMAGE} + onbuild onbuild run echo test + `, + nil, nil, + }, + t, nil, true, + ) + if err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestBuildOnBuildForbiddenFromTrigger(t *testing.T) { + _, err := buildImage(testContextTemplate{` + from {IMAGE} + onbuild from {IMAGE} + `, + nil, nil, + }, + t, nil, true, + ) + if err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestBuildOnBuildForbiddenMaintainerTrigger(t *testing.T) { + _, err := buildImage(testContextTemplate{` + from {IMAGE} + onbuild maintainer test + `, + nil, nil, + }, + t, nil, true, + ) + if err == nil { + t.Fatal("Error should not be nil") + } +} diff --git a/integration/commands_test.go b/integration/commands_test.go index a0fc4b9523..9f7a41384c 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -4,6 +4,7 @@ import ( "bufio" "fmt" "github.com/dotcloud/docker" + "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/term" "github.com/dotcloud/docker/utils" @@ -119,7 +120,7 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error func TestRunHostname(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -164,7 +165,7 @@ func TestRunHostname(t *testing.T) { func TestRunWorkdir(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -209,7 +210,7 @@ func TestRunWorkdir(t *testing.T) { func TestRunWorkdirExists(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -254,7 +255,7 @@ func TestRunExit(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -307,7 +308,7 @@ func TestRunDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -353,7 +354,7 @@ func TestRunDisconnectTty(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -405,7 +406,7 @@ func TestRunAttachStdin(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -469,7 +470,7 @@ func TestRunDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -516,7 +517,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -549,7 +550,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch = make(chan struct{}) go func() { @@ -597,7 +598,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) // Discard the CmdRun output @@ -615,7 +616,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch := make(chan struct{}) go func() { @@ -662,7 +663,7 @@ func TestAttachDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) go func() { @@ -731,7 +732,7 @@ func TestAttachDisconnect(t *testing.T) { func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -767,7 +768,7 @@ func TestRunAutoRemove(t *testing.T) { func TestCmdLogs(t *testing.T) { t.Skip("Test not impemented") - cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil { @@ -785,7 +786,7 @@ func TestCmdLogs(t *testing.T) { // Expected behaviour: error out when attempting to bind mount non-existing source paths func TestRunErrorBindNonExistingSource(t *testing.T) { - cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -805,7 +806,7 @@ func TestRunErrorBindNonExistingSource(t *testing.T) { func TestImagesViz(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) @@ -813,7 +814,7 @@ func TestImagesViz(t *testing.T) { c := make(chan struct{}) go func() { defer close(c) - if err := cli.CmdImages("-viz"); err != nil { + if err := cli.CmdImages("--viz"); err != nil { t.Fatal(err) } stdoutPipe.Close() @@ -846,7 +847,7 @@ func TestImagesViz(t *testing.T) { for _, regexp := range compiledRegexps { if !regexp.MatchString(cmdOutput) { - t.Fatalf("images -viz content '%s' did not match regexp '%s'", cmdOutput, regexp) + t.Fatalf("images --viz content '%s' did not match regexp '%s'", cmdOutput, regexp) } } }) @@ -855,7 +856,7 @@ func TestImagesViz(t *testing.T) { func TestImagesTree(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) @@ -863,7 +864,7 @@ func TestImagesTree(t *testing.T) { c := make(chan struct{}) go func() { defer close(c) - if err := cli.CmdImages("-tree"); err != nil { + if err := cli.CmdImages("--tree"); err != nil { t.Fatal(err) } stdoutPipe.Close() @@ -895,7 +896,7 @@ func TestImagesTree(t *testing.T) { for _, regexp := range compiledRegexps { if !regexp.MatchString(cmdOutput) { - t.Fatalf("images -tree content '%s' did not match regexp '%s'", cmdOutput, regexp) + t.Fatalf("images --tree content '%s' did not match regexp '%s'", cmdOutput, regexp) } } }) @@ -927,8 +928,8 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] } // #2098 - Docker cidFiles only contain short version of the containerId -//sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test" -// TestRunCidFile tests that run -cidfile returns the longid +//sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" +// TestRunCidFile tests that run --cidfile returns the longid func TestRunCidFile(t *testing.T) { stdout, stdoutPipe := io.Pipe() @@ -938,13 +939,13 @@ func TestRunCidFile(t *testing.T) { } tmpCidFile := path.Join(tmpDir, "cid") - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) - if err := cli.CmdRun("-cidfile", tmpCidFile, unitTestImageID, "ls"); err != nil { + if err := cli.CmdRun("--cidfile", tmpCidFile, unitTestImageID, "ls"); err != nil { t.Fatal(err) } }() @@ -966,7 +967,7 @@ func TestRunCidFile(t *testing.T) { id := string(buffer) if len(id) != len("2bf44ea18873287bd9ace8a4cb536a7cbe134bed67e805fdf2f58a57f69b320c") { - t.Fatalf("-cidfile should be a long id, not '%s'", id) + t.Fatalf("--cidfile should be a long id, not '%s'", id) } //test that its a valid cid? (though the container is gone..) //remove the file and dir. @@ -988,7 +989,7 @@ func TestContainerOrphaning(t *testing.T) { defer os.RemoveAll(tmpDir) // setup a CLI and server - cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) srv := mkServerFromEngine(globalEngine, t) @@ -1030,7 +1031,10 @@ func TestContainerOrphaning(t *testing.T) { buildSomething(template2, imageName) // remove the second image by name - resp, err := srv.DeleteImage(imageName, true) + resp := engine.NewTable("", 0) + if err := srv.DeleteImage(imageName, resp, true, false); err == nil { + t.Fatal("Expected error, got none") + } // see if we deleted the first image (and orphaned the container) for _, i := range resp.Data { @@ -1042,11 +1046,12 @@ func TestContainerOrphaning(t *testing.T) { } func TestCmdKill(t *testing.T) { - stdin, stdinPipe := io.Pipe() - stdout, stdoutPipe := io.Pipe() - - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - cli2 := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + var ( + stdin, stdinPipe = io.Pipe() + stdout, stdoutPipe = io.Pipe() + cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli2 = api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + ) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -1085,6 +1090,7 @@ func TestCmdKill(t *testing.T) { } }) + stdout.Close() time.Sleep(500 * time.Millisecond) if !container.State.IsRunning() { t.Fatal("The container should be still running") diff --git a/integration/container_test.go b/integration/container_test.go index 97f4cd282f..4efb95a2a1 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -3,7 +3,7 @@ package docker import ( "bufio" "fmt" - "github.com/dotcloud/docker" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -20,7 +20,7 @@ func TestIDFormat(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container1, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/sh", "-c", "echo hello world"}, }, @@ -234,7 +234,7 @@ func TestCommitAutoRun(t *testing.T) { t.Errorf("Container shouldn't be running") } - img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &docker.Config{Cmd: []string{"cat", "/world"}}) + img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &runconfig.Config{Cmd: []string{"cat", "/world"}}) if err != nil { t.Error(err) } @@ -415,7 +415,7 @@ func TestOutput(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, @@ -438,7 +438,7 @@ func TestContainerNetwork(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, }, @@ -460,7 +460,7 @@ func TestKillDifferentUser(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, OpenStdin: true, @@ -520,7 +520,7 @@ func TestCreateVolume(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, hc, _, err := docker.ParseRun([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil) + config, hc, _, err := runconfig.Parse([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil) if err != nil { t.Fatal(err) } @@ -552,7 +552,7 @@ func TestCreateVolume(t *testing.T) { func TestKill(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -596,7 +596,7 @@ func TestExitCode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - trueContainer, _, err := runtime.Create(&docker.Config{ + trueContainer, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/true"}, }, "") @@ -611,7 +611,7 @@ func TestExitCode(t *testing.T) { t.Fatalf("Unexpected exit code %d (expected 0)", code) } - falseContainer, _, err := runtime.Create(&docker.Config{ + falseContainer, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/false"}, }, "") @@ -630,7 +630,7 @@ func TestExitCode(t *testing.T) { func TestRestart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, @@ -661,7 +661,7 @@ func TestRestart(t *testing.T) { func TestRestartStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -739,7 +739,7 @@ func TestUser(t *testing.T) { defer nuke(runtime) // Default user must be root - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, }, @@ -758,7 +758,7 @@ func TestUser(t *testing.T) { } // Set a username - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -779,7 +779,7 @@ func TestUser(t *testing.T) { } // Set a UID - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -800,7 +800,7 @@ func TestUser(t *testing.T) { } // Set a different user by uid - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -823,7 +823,7 @@ func TestUser(t *testing.T) { } // Set a different user by username - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -844,7 +844,7 @@ func TestUser(t *testing.T) { } // Test an wrong username - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -866,7 +866,7 @@ func TestMultipleContainers(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _, err := runtime.Create(&docker.Config{ + container1, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -877,7 +877,7 @@ func TestMultipleContainers(t *testing.T) { } defer runtime.Destroy(container1) - container2, _, err := runtime.Create(&docker.Config{ + container2, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -921,7 +921,7 @@ func TestMultipleContainers(t *testing.T) { func TestStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -966,7 +966,7 @@ func TestStdin(t *testing.T) { func TestTty(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -1013,7 +1013,7 @@ func TestEnv(t *testing.T) { os.Setenv("TRICKY", "tri\ncky\n") runtime := mkRuntime(t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) + config, _, _, err := runconfig.Parse([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) if err != nil { t.Fatal(err) } @@ -1044,7 +1044,6 @@ func TestEnv(t *testing.T) { goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOME=/", - "container=lxc", "HOSTNAME=" + utils.TruncateID(container.ID), "FALSE=true", "TRUE=false", @@ -1067,7 +1066,7 @@ func TestEntrypoint(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo"}, Cmd: []string{"-n", "foobar"}, @@ -1091,7 +1090,7 @@ func TestEntrypointNoCmd(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo", "foobar"}, }, @@ -1114,7 +1113,7 @@ func BenchmarkRunSequencial(b *testing.B) { runtime := mkRuntime(b) defer nuke(runtime) for i := 0; i < b.N; i++ { - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, @@ -1147,7 +1146,7 @@ func BenchmarkRunParallel(b *testing.B) { complete := make(chan error) tasks = append(tasks, complete) go func(i int, complete chan error) { - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, @@ -1301,7 +1300,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1321,7 +1320,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) { } container2, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID + ":ro", @@ -1362,7 +1361,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1382,7 +1381,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) { } container2, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID, @@ -1418,7 +1417,7 @@ func TestRestartWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1462,7 +1461,7 @@ func TestVolumesFromWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1491,7 +1490,7 @@ func TestVolumesFromWithVolumes(t *testing.T) { } container2, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat", "/test/foo"}, VolumesFrom: container.ID, @@ -1529,7 +1528,7 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, hc, _, err := docker.ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) + config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) if err != nil { t.Fatal(err) } @@ -1581,8 +1580,8 @@ func TestPrivilegedCanMknod(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() - if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { - t.Fatal("Could not mknod into privileged container") + if output, err := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { + t.Fatalf("Could not mknod into privileged container %s %v", output, err) } } @@ -1617,7 +1616,7 @@ func TestMultipleVolumesFrom(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1646,7 +1645,7 @@ func TestMultipleVolumesFrom(t *testing.T) { } container2, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"}, Volumes: map[string]struct{}{"/other": {}}, @@ -1668,7 +1667,7 @@ func TestMultipleVolumesFrom(t *testing.T) { } container3, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","), @@ -1696,7 +1695,7 @@ func TestRestartGhost(t *testing.T) { defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, diff --git a/integration/graph_test.go b/integration/graph_test.go index eec4c5c7dc..ff1c0d9361 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -4,6 +4,7 @@ import ( "errors" "github.com/dotcloud/docker" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/utils" "io" @@ -105,8 +106,8 @@ func TestGraphCreate(t *testing.T) { if image.Comment != "Testing" { t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", image.Comment) } - if image.DockerVersion != docker.VERSION { - t.Fatalf("Wrong docker_version: should be '%s', not '%s'", docker.VERSION, image.DockerVersion) + if image.DockerVersion != dockerversion.VERSION { + t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, image.DockerVersion) } images, err := graph.Map() if err != nil { diff --git a/integration/runtime_test.go b/integration/runtime_test.go index da95967a30..1e912c1bb4 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -5,6 +5,8 @@ import ( "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" "io" @@ -83,7 +85,7 @@ func init() { os.Setenv("TEST", "1") // Hack to run sys init during unit testing - if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" { + if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { sysinit.SysInit() return } @@ -121,19 +123,8 @@ func init() { } func setupBaseImage() { - eng, err := engine.New(unitTestStoreBase) - if err != nil { - log.Fatalf("Can't initialize engine at %s: %s", unitTestStoreBase, err) - } - job := eng.Job("initserver") - job.Setenv("Root", unitTestStoreBase) - job.SetenvBool("Autorestart", false) - job.Setenv("BridgeIface", unitTestNetworkBridge) - if err := job.Run(); err != nil { - log.Fatalf("Unable to create a runtime for tests: %s", err) - } - - job = eng.Job("inspect", unitTestImageName, "image") + eng := newTestEngine(log.New(os.Stderr, "", 0), false, unitTestStoreBase) + job := eng.Job("inspect", unitTestImageName, "image") img, _ := job.Stdout.AddEnv() // If the unit test is not found, try to download it. if err := job.Run(); err != nil || img.Get("id") != unitTestImageID { @@ -169,9 +160,14 @@ func spawnGlobalDaemon() { log.Fatalf("Unable to spawn the test daemon: %s", err) } }() + // Give some time to ListenAndServer to actually start // FIXME: use inmem transports instead of tcp time.Sleep(time.Second) + + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatalf("Unable to accept connections for test api: %s", err) + } } // FIXME: test that ImagePull(json=true) send correct json output @@ -199,7 +195,7 @@ func TestRuntimeCreate(t *testing.T) { t.Errorf("Expected 0 containers, %v found", len(runtime.List())) } - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, @@ -242,23 +238,23 @@ func TestRuntimeCreate(t *testing.T) { // Test that conflict error displays correct details testContainer, _, _ := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "conflictname", ) - if _, _, err := runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { + if _, _, err := runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error()) } // Make sure create with bad parameters returns an error - if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil { + if _, _, err = runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID}, ""); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is missing") } if _, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{}, }, @@ -267,7 +263,7 @@ func TestRuntimeCreate(t *testing.T) { t.Fatal("Builder.Create should throw an error when Cmd is empty") } - config := &docker.Config{ + config := &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/ls"}, PortSpecs: []string{"80"}, @@ -280,7 +276,7 @@ func TestRuntimeCreate(t *testing.T) { } // test expose 80:8000 - container, warnings, err := runtime.Create(&docker.Config{ + container, warnings, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, PortSpecs: []string{"80:8000"}, @@ -299,7 +295,7 @@ func TestDestroy(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "") @@ -368,7 +364,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *doc eng = NewTestEngine(t) runtime = mkRuntimeFromEngine(eng, t) port = 5554 - p docker.Port + p nat.Port ) defer func() { if err != nil { @@ -387,8 +383,8 @@ func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *doc } else { t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) } - ep := make(map[docker.Port]struct{}, 1) - p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto)) + ep := make(map[nat.Port]struct{}, 1) + p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto)) ep[p] = struct{}{} jobCreate := eng.Job("create") @@ -411,8 +407,8 @@ func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *doc } jobStart := eng.Job("start", id) - portBindings := make(map[docker.Port][]docker.PortBinding) - portBindings[p] = []docker.PortBinding{ + portBindings := make(map[nat.Port][]nat.PortBinding) + portBindings[p] = []nat.PortBinding{ {}, } if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { @@ -568,18 +564,7 @@ func TestRestore(t *testing.T) { // Here are are simulating a docker restart - that is, reloading all containers // from scratch - root := eng.Root() - eng, err := engine.New(root) - if err != nil { - t.Fatal(err) - } - job := eng.Job("initserver") - job.Setenv("Root", eng.Root()) - job.SetenvBool("Autorestart", false) - if err := job.Run(); err != nil { - t.Fatal(err) - } - + eng = newTestEngine(t, false, eng.Root()) runtime2 := mkRuntimeFromEngine(eng, t) if len(runtime2.List()) != 2 { t.Errorf("Expected 2 container, %v found", len(runtime2.List())) @@ -604,114 +589,12 @@ func TestRestore(t *testing.T) { container2.State.SetStopped(0) } -func TestReloadContainerLinks(t *testing.T) { - // FIXME: here we don't use NewTestEngine because it calls initserver with Autorestart=false, - // and we want to set it to true. - root, err := newTestDirectory(unitTestStoreBase) - if err != nil { - t.Fatal(err) - } - eng, err := engine.New(root) - if err != nil { - t.Fatal(err) - } - job := eng.Job("initserver") - job.Setenv("Root", eng.Root()) - job.SetenvBool("Autorestart", true) - if err := job.Run(); err != nil { - t.Fatal(err) - } - - runtime1 := mkRuntimeFromEngine(eng, t) - defer nuke(runtime1) - // Create a container with one instance of docker - container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t) - defer runtime1.Destroy(container1) - - // Create a second container meant to be killed - container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) - defer runtime1.Destroy(container2) - - // Start the container non blocking - if err := container2.Start(); err != nil { - t.Fatal(err) - } - // Add a link to container 2 - // FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink(). - // Why do we need it @crosbymichael? - // container1.hostConfig.Links = []string{"/" + container2.ID + ":first"} - if err := runtime1.RegisterLink(container1, container2, "first"); err != nil { - t.Fatal(err) - } - if err := container1.Start(); err != nil { - t.Fatal(err) - } - - if !container2.State.IsRunning() { - t.Fatalf("Container %v should appear as running but isn't", container2.ID) - } - - if !container1.State.IsRunning() { - t.Fatalf("Container %s should appear as running but isn't", container1.ID) - } - - if len(runtime1.List()) != 2 { - t.Errorf("Expected 2 container, %v found", len(runtime1.List())) - } - - // Here are are simulating a docker restart - that is, reloading all containers - // from scratch - eng, err = engine.New(root) - if err != nil { - t.Fatal(err) - } - job = eng.Job("initserver") - job.Setenv("Root", eng.Root()) - job.SetenvBool("Autorestart", false) - if err := job.Run(); err != nil { - t.Fatal(err) - } - - runtime2 := mkRuntimeFromEngine(eng, t) - if len(runtime2.List()) != 2 { - t.Errorf("Expected 2 container, %v found", len(runtime2.List())) - } - runningCount := 0 - for _, c := range runtime2.List() { - if c.State.IsRunning() { - runningCount++ - } - } - if runningCount != 2 { - t.Fatalf("Expected 2 container alive, %d found", runningCount) - } - - // FIXME: we no longer test if containers were registered in the right order, - // because there is no public - // Make sure container 2 ( the child of container 1 ) was registered and started first - // with the runtime - // - containers := runtime2.List() - if len(containers) == 0 { - t.Fatalf("Runtime has no containers") - } - first := containers[0] - if first.ID != container2.ID { - t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID) - } - - // Verify that the link is still registered in the runtime - if c := runtime2.Get(container1.Name); c == nil { - t.Fatal("Named container is no longer registered after restart") - } -} - func TestDefaultContainerName(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -735,7 +618,7 @@ func TestRandomContainerName(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -766,7 +649,7 @@ func TestContainerNameValidation(t *testing.T) { {"abc-123_AAA.1", true}, {"\000asdf", false}, } { - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { if !test.Valid { continue @@ -807,7 +690,7 @@ func TestLinkChildContainer(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -823,7 +706,7 @@ func TestLinkChildContainer(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err = runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -849,7 +732,7 @@ func TestGetAllChildren(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -865,7 +748,7 @@ func TestGetAllChildren(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err = runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -902,7 +785,7 @@ func TestDestroyWithInitLayer(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "") diff --git a/integration/server_test.go b/integration/server_test.go index 45d4930ad7..69a90527bf 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -3,6 +3,7 @@ package docker import ( "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" "strings" "testing" "time" @@ -35,7 +36,7 @@ func TestImageTagImageDelete(t *testing.T) { t.Errorf("Expected %d images, %d found", nExpected, nActual) } - if _, err := srv.DeleteImage("utest/docker:tag2", true); err != nil { + if err := srv.DeleteImage("utest/docker:tag2", engine.NewTable("", 0), true, false); err != nil { t.Fatal(err) } @@ -47,7 +48,7 @@ func TestImageTagImageDelete(t *testing.T) { t.Errorf("Expected %d images, %d found", nExpected, nActual) } - if _, err := srv.DeleteImage("utest:5000/docker:tag3", true); err != nil { + if err := srv.DeleteImage("utest:5000/docker:tag3", engine.NewTable("", 0), true, false); err != nil { t.Fatal(err) } @@ -56,7 +57,7 @@ func TestImageTagImageDelete(t *testing.T) { nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 1 nActual = len(images.Data[0].GetList("RepoTags")) - if _, err := srv.DeleteImage("utest:tag1", true); err != nil { + if err := srv.DeleteImage("utest:tag1", engine.NewTable("", 0), true, false); err != nil { t.Fatal(err) } @@ -71,7 +72,7 @@ func TestCreateRm(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -118,7 +119,7 @@ func TestCreateNumberHostname(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := docker.ParseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -130,7 +131,7 @@ func TestCreateNumberUsername(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := docker.ParseRun([]string{"-u", "1002", unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{"-u", "1002", unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -142,7 +143,7 @@ func TestCreateRmVolumes(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, hostConfig, _, err := docker.ParseRun([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil) + config, hostConfig, _, err := runconfig.Parse([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil) if err != nil { t.Fatal(err) } @@ -198,11 +199,73 @@ func TestCreateRmVolumes(t *testing.T) { } } +func TestCreateRmRunning(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + config, hostConfig, _, err := runconfig.Parse([]string{"-name", "foo", unitTestImageID, "sleep 300"}, nil) + if err != nil { + t.Fatal(err) + } + + id := createTestContainer(eng, config, t) + + job := eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) + } + + job = eng.Job("start", id) + if err := job.ImportEnv(hostConfig); err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + // Test cannot remove running container + job = eng.Job("container_delete", id) + job.SetenvBool("forceRemove", false) + if err := job.Run(); err == nil { + t.Fatal("Expected container delete to fail") + } + + // Test can force removal of running container + job = eng.Job("container_delete", id) + job.SetenvBool("forceRemove", true) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + job = eng.Job("containers") + job.SetenvBool("all", true) + outs, err = job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 0 { + t.Errorf("Expected 0 container, %v found", len(outs.Data)) + } +} + func TestCommit(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -224,7 +287,7 @@ func TestRestartKillWait(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() - config, hostConfig, _, err := docker.ParseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -257,20 +320,7 @@ func TestRestartKillWait(t *testing.T) { t.Fatal(err) } - eng, err = engine.New(eng.Root()) - if err != nil { - t.Fatal(err) - } - - job = eng.Job("initserver") - job.Setenv("Root", eng.Root()) - job.SetenvBool("AutoRestart", false) - // TestGetEnabledCors and TestOptionsRoute require EnableCors=true - job.SetenvBool("EnableCors", true) - if err := job.Run(); err != nil { - t.Fatal(err) - } - + eng = newTestEngine(t, false, eng.Root()) srv = mkServerFromEngine(eng, t) job = srv.Eng.Job("containers") @@ -302,7 +352,7 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, hostConfig, _, err := docker.ParseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -401,7 +451,7 @@ func TestRmi(t *testing.T) { initialImages := getAllImages(eng, t) - config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "echo", "test"}, nil) + config, hostConfig, _, err := runconfig.Parse([]string{unitTestImageID, "echo", "test"}, nil) if err != nil { t.Fatal(err) } @@ -460,8 +510,7 @@ func TestRmi(t *testing.T) { t.Fatalf("Expected 2 new images, found %d.", images.Len()-initialImages.Len()) } - _, err = srv.DeleteImage(imageID, true) - if err != nil { + if err = srv.DeleteImage(imageID, engine.NewTable("", 0), true, false); err != nil { t.Fatal(err) } @@ -548,7 +597,7 @@ func TestListContainers(t *testing.T) { srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() - config := docker.Config{ + config := runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "cat"}, OpenStdin: true, @@ -671,7 +720,7 @@ func TestDeleteTagWithExistingContainers(t *testing.T) { } // Create a container from the image - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -696,8 +745,8 @@ func TestDeleteTagWithExistingContainers(t *testing.T) { } // Try to remove the tag - imgs, err := srv.DeleteImage("utest:tag1", true) - if err != nil { + imgs := engine.NewTable("", 0) + if err := srv.DeleteImage("utest:tag1", imgs, true, false); err != nil { t.Fatal(err) } @@ -705,7 +754,7 @@ func TestDeleteTagWithExistingContainers(t *testing.T) { t.Fatalf("Should only have deleted one untag %d", len(imgs.Data)) } - if untag := imgs.Data[0].Get("Untagged"); untag != unitTestImageID { + if untag := imgs.Data[0].Get("Untagged"); untag != "utest:tag1" { t.Fatalf("Expected %s got %s", unitTestImageID, untag) } } diff --git a/integration/utils_test.go b/integration/utils_test.go index 450cb7527f..05d73df52a 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -1,9 +1,9 @@ package docker import ( - "archive/tar" "bytes" "fmt" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "net/http" @@ -15,7 +15,9 @@ import ( "time" "github.com/dotcloud/docker" + "github.com/dotcloud/docker/builtins" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" ) @@ -26,29 +28,15 @@ import ( // Create a temporary runtime suitable for unit testing. // Call t.Fatal() at the first error. func mkRuntime(f utils.Fataler) *docker.Runtime { - root, err := newTestDirectory(unitTestStoreBase) - if err != nil { - f.Fatal(err) - } - config := &docker.DaemonConfig{ - Root: root, - AutoRestart: false, - Mtu: docker.GetDefaultNetworkMtu(), - } - - eng, err := engine.New(root) - if err != nil { - f.Fatal(err) - } - - r, err := docker.NewRuntimeFromDirectory(config, eng) - if err != nil { - f.Fatal(err) - } - return r + eng := newTestEngine(f, false, "") + return mkRuntimeFromEngine(eng, f) + // FIXME: + // [...] + // Mtu: docker.GetDefaultNetworkMtu(), + // [...] } -func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler, name string) (shortId string) { +func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler, name string) (shortId string) { job := eng.Job("create", name) if err := job.ImportEnv(config); err != nil { f.Fatal(err) @@ -60,7 +48,7 @@ func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils return } -func createTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler) (shortId string) { +func createTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler) (shortId string) { return createNamedTestContainer(eng, config, f, "") } @@ -184,20 +172,25 @@ func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime { return runtime } -func NewTestEngine(t utils.Fataler) *engine.Engine { - root, err := newTestDirectory(unitTestStoreBase) - if err != nil { - t.Fatal(err) +func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engine { + if root == "" { + if dir, err := newTestDirectory(unitTestStoreBase); err != nil { + t.Fatal(err) + } else { + root = dir + } } eng, err := engine.New(root) if err != nil { t.Fatal(err) } // Load default plugins + builtins.Register(eng) // (This is manually copied and modified from main() until we have a more generic plugin system) job := eng.Job("initserver") job.Setenv("Root", root) - job.SetenvBool("AutoRestart", false) + job.SetenvBool("AutoRestart", autorestart) + job.Setenv("ExecDriver", "native") // TestGetEnabledCors and TestOptionsRoute require EnableCors=true job.SetenvBool("EnableCors", true) if err := job.Run(); err != nil { @@ -206,6 +199,10 @@ func NewTestEngine(t utils.Fataler) *engine.Engine { return eng } +func NewTestEngine(t utils.Fataler) *engine.Engine { + return newTestEngine(t, false, "") +} + func newTestDirectory(templateDir string) (dir string, err error) { return utils.TestDirectory(templateDir) } @@ -252,8 +249,8 @@ func readFile(src string, t *testing.T) (content string) { // dynamically replaced by the current test image. // The caller is responsible for destroying the container. // Call t.Fatal() at the first error. -func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *docker.HostConfig, error) { - config, hc, _, err := docker.ParseRun(args, nil) +func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *runconfig.HostConfig, error) { + config, hc, _, err := runconfig.Parse(args, nil) defer func() { if err != nil && t != nil { t.Fatal(err) @@ -318,7 +315,7 @@ func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testi } // FIXME: this is duplicated from graph_test.go in the docker package. -func fakeTar() (io.Reader, error) { +func fakeTar() (io.ReadCloser, error) { content := []byte("Hello world!\n") buf := new(bytes.Buffer) tw := tar.NewWriter(buf) @@ -332,7 +329,7 @@ func fakeTar() (io.Reader, error) { tw.Write([]byte(content)) } tw.Close() - return buf, nil + return ioutil.NopCloser(buf), nil } func getAllImages(eng *engine.Engine, t *testing.T) *engine.Table { diff --git a/links.go b/links/links.go similarity index 77% rename from links.go rename to links/links.go index aa1c08374b..7665a06a11 100644 --- a/links.go +++ b/links/links.go @@ -1,8 +1,9 @@ -package docker +package links import ( "fmt" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" "path" "strings" ) @@ -12,31 +13,28 @@ type Link struct { ChildIP string Name string ChildEnvironment []string - Ports []Port + Ports []nat.Port IsEnabled bool eng *engine.Engine } -func NewLink(parent, child *Container, name string, eng *engine.Engine) (*Link, error) { - if parent.ID == child.ID { - return nil, fmt.Errorf("Cannot link to self: %s == %s", parent.ID, child.ID) - } - if !child.State.IsRunning() { - return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, name) - } +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}, eng *engine.Engine) (*Link, error) { - ports := make([]Port, len(child.Config.ExposedPorts)) - var i int - for p := range child.Config.ExposedPorts { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { ports[i] = p i++ } l := &Link{ Name: name, - ChildIP: child.NetworkSettings.IPAddress, - ParentIP: parent.NetworkSettings.IPAddress, - ChildEnvironment: child.Config.Env, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, Ports: ports, eng: eng, } @@ -74,7 +72,7 @@ func (l *Link) ToEnv() []string { if len(parts) != 2 { continue } - // Ignore a few variables that are added during docker build + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) if parts[0] == "HOME" || parts[0] == "PATH" { continue } @@ -85,14 +83,14 @@ func (l *Link) ToEnv() []string { } // Default port rules -func (l *Link) getDefaultPort() *Port { - var p Port +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port i := len(l.Ports) if i == 0 { return nil } else if i > 1 { - sortPorts(l.Ports, func(ip, jp Port) bool { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { // If the two ports have the same number, tcp takes priority // Sort in desc order return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") diff --git a/links_test.go b/links/links_test.go similarity index 60% rename from links_test.go rename to links/links_test.go index 8a266a9a3d..e66f9bfb78 100644 --- a/links_test.go +++ b/links/links_test.go @@ -1,36 +1,16 @@ -package docker +package links import ( + "github.com/dotcloud/docker/nat" "strings" "testing" ) -func newMockLinkContainer(id string, ip string) *Container { - return &Container{ - Config: &Config{}, - ID: id, - NetworkSettings: &NetworkSettings{ - IPAddress: ip, - }, - } -} - func TestLinkNew(t *testing.T) { - toID := GenerateID() - fromID := GenerateID() + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} - from := newMockLinkContainer(fromID, "172.0.17.2") - from.Config.Env = []string{} - from.State = State{Running: true} - ports := make(map[Port]struct{}) - - ports[Port("6379/tcp")] = struct{}{} - - from.Config.ExposedPorts = ports - - to := newMockLinkContainer(toID, "172.0.17.3") - - link, err := NewLink(to, from, "/db/docker", nil) + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports, nil) if err != nil { t.Fatal(err) } @@ -51,28 +31,17 @@ func TestLinkNew(t *testing.T) { t.Fail() } for _, p := range link.Ports { - if p != Port("6379/tcp") { + if p != nat.Port("6379/tcp") { t.Fail() } } } func TestLinkEnv(t *testing.T) { - toID := GenerateID() - fromID := GenerateID() + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} - from := newMockLinkContainer(fromID, "172.0.17.2") - from.Config.Env = []string{"PASSWORD=gordon"} - from.State = State{Running: true} - ports := make(map[Port]struct{}) - - ports[Port("6379/tcp")] = struct{}{} - - from.Config.ExposedPorts = ports - - to := newMockLinkContainer(toID, "172.0.17.3") - - link, err := NewLink(to, from, "/db/docker", nil) + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) if err != nil { t.Fatal(err) } diff --git a/nat/nat.go b/nat/nat.go new file mode 100644 index 0000000000..f3af362f8b --- /dev/null +++ b/nat/nat.go @@ -0,0 +1,133 @@ +package nat + +// nat is a convenience package for docker's manipulation of strings describing +// network ports. + +import ( + "fmt" + "github.com/dotcloud/docker/utils" + "strconv" + "strings" +) + +const ( + PortSpecTemplate = "ip:hostPort:containerPort" + PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort" +) + +type PortBinding struct { + HostIp string + HostPort string +} + +type PortMap map[Port][]PortBinding + +type PortSet map[Port]struct{} + +// 80/tcp +type Port string + +func NewPort(proto, port string) Port { + return Port(fmt.Sprintf("%s/%s", port, proto)) +} + +func ParsePort(rawPort string) (int, error) { + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +func (p Port) Proto() string { + parts := strings.Split(string(p), "/") + if len(parts) == 1 { + return "tcp" + } + return parts[1] +} + +func (p Port) Port() string { + return strings.Split(string(p), "/")[0] +} + +func (p Port) Int() int { + i, err := ParsePort(p.Port()) + if err != nil { + panic(err) + } + return i +} + +// Splits a port in the format of port/proto +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if l == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + return parts[0], parts[1] +} + +// We will receive port specs in the format of ip:public:private/proto and these need to be +// parsed in the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + + for _, rawPort := range ports { + proto := "tcp" + + if i := strings.LastIndex(rawPort, "/"); i != -1 { + proto = rawPort[i+1:] + rawPort = rawPort[:i] + } + if !strings.Contains(rawPort, ":") { + rawPort = fmt.Sprintf("::%s", rawPort) + } else if len(strings.Split(rawPort, ":")) == 2 { + rawPort = fmt.Sprintf(":%s", rawPort) + } + + parts, err := utils.PartParser(PortSpecTemplate, rawPort) + if err != nil { + return nil, nil, err + } + + var ( + containerPort = parts["containerPort"] + rawIp = parts["ip"] + hostPort = parts["hostPort"] + ) + + if containerPort == "" { + return nil, nil, fmt.Errorf("No port specified: %s", rawPort) + } + if _, err := strconv.ParseUint(containerPort, 10, 16); err != nil { + return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + if _, err := strconv.ParseUint(hostPort, 10, 16); hostPort != "" && err != nil { + return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + + port := NewPort(proto, containerPort) + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + + binding := PortBinding{ + HostIp: rawIp, + HostPort: hostPort, + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, binding) + } + return exposedPorts, bindings, nil +} diff --git a/nat/sort.go b/nat/sort.go new file mode 100644 index 0000000000..f36c12f7bb --- /dev/null +++ b/nat/sort.go @@ -0,0 +1,28 @@ +package nat + +import "sort" + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} diff --git a/sorter_unit_test.go b/nat/sort_test.go similarity index 86% rename from sorter_unit_test.go rename to nat/sort_test.go index 0669feedb3..5d490e321b 100644 --- a/sorter_unit_test.go +++ b/nat/sort_test.go @@ -1,4 +1,4 @@ -package docker +package nat import ( "fmt" @@ -11,7 +11,7 @@ func TestSortUniquePorts(t *testing.T) { Port("22/tcp"), } - sortPorts(ports, func(ip, jp Port) bool { + Sort(ports, func(ip, jp Port) bool { return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") }) @@ -30,7 +30,7 @@ func TestSortSamePortWithDifferentProto(t *testing.T) { Port("6379/udp"), } - sortPorts(ports, func(ip, jp Port) bool { + Sort(ports, func(ip, jp Port) bool { return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") }) diff --git a/networkdriver/lxc/driver.go b/networkdriver/lxc/driver.go index 3f9c0af011..6185c42752 100644 --- a/networkdriver/lxc/driver.go +++ b/networkdriver/lxc/driver.go @@ -57,12 +57,6 @@ var ( currentInterfaces = make(map[string]*networkInterface) ) -func init() { - if err := engine.Register("init_networkdriver", InitDriver); err != nil { - panic(err) - } -} - func InitDriver(job *engine.Job) engine.Status { var ( network *net.IPNet @@ -172,7 +166,6 @@ func setupIPTables(addr net.Addr, icc bool) error { iptables.Raw(append([]string{"-D"}, acceptArgs...)...) if !iptables.Exists(dropArgs...) { - utils.Debugf("Disable inter-container communication") if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) @@ -470,6 +463,20 @@ func LinkContainers(job *engine.Job) engine.Status { job.Errorf("Error toggle iptables forward: %s", output) return engine.StatusErr } + + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", childIP, + "--sport", port, + "-d", parentIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + job.Error(err) + return engine.StatusErr + } else if len(output) != 0 { + job.Errorf("Error toggle iptables forward: %s", output) + return engine.StatusErr + } } return engine.StatusOK } diff --git a/networkdriver/network_test.go b/networkdriver/network_test.go index c15f8b1cf5..6224c2dffb 100644 --- a/networkdriver/network_test.go +++ b/networkdriver/network_test.go @@ -105,7 +105,7 @@ func TestNetworkOverlaps(t *testing.T) { //netY starts before and ends at same IP of netX AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) //netY starts before and ends outside of netX - AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) + AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t) //netY starts and ends before netX AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) //netX starts and ends before netY diff --git a/networkdriver/portmapper/mapper.go b/networkdriver/portmapper/mapper.go index f052c48143..e29959a245 100644 --- a/networkdriver/portmapper/mapper.go +++ b/networkdriver/portmapper/mapper.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/proxy" + "github.com/dotcloud/docker/pkg/proxy" "net" "sync" ) diff --git a/networkdriver/portmapper/mapper_test.go b/networkdriver/portmapper/mapper_test.go index 05718063e3..4c09f3c651 100644 --- a/networkdriver/portmapper/mapper_test.go +++ b/networkdriver/portmapper/mapper_test.go @@ -2,7 +2,7 @@ package portmapper import ( "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/proxy" + "github.com/dotcloud/docker/pkg/proxy" "net" "testing" ) diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index 91ac3842ac..b40e1a31fa 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -5,10 +5,23 @@ import ( "fmt" "github.com/dotcloud/docker/pkg/mount" "io" + "io/ioutil" "os" + "path/filepath" + "strconv" "strings" ) +type Cgroup struct { + Name string `json:"name,omitempty"` + Parent string `json:"parent,omitempty"` + + DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice + Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) + MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) +} + // https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt func FindCgroupMountpoint(subsystem string) (string, error) { mounts, err := mount.GetMounts() @@ -25,7 +38,6 @@ func FindCgroupMountpoint(subsystem string) (string, error) { } } } - return "", fmt.Errorf("cgroup mountpoint not found for %s", subsystem) } @@ -40,18 +52,199 @@ func GetThisCgroupDir(subsystem string) (string, error) { return parseCgroupFile(subsystem, f) } +func GetInitCgroupDir(subsystem string) (string, error) { + f, err := os.Open("/proc/1/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return parseCgroupFile(subsystem, f) +} + +func (c *Cgroup) Path(root, subsystem string) (string, error) { + cgroup := c.Name + if c.Parent != "" { + cgroup = filepath.Join(c.Parent, cgroup) + } + initPath, err := GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + return filepath.Join(root, subsystem, initPath, cgroup), nil +} + +func (c *Cgroup) Join(root, subsystem string, pid int) (string, error) { + path, err := c.Path(root, subsystem) + if err != nil { + return "", err + } + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return "", err + } + if err := writeFile(path, "tasks", strconv.Itoa(pid)); err != nil { + return "", err + } + return path, nil +} + +func (c *Cgroup) Cleanup(root string) error { + get := func(subsystem string) string { + path, _ := c.Path(root, subsystem) + return path + } + + for _, path := range []string{ + get("memory"), + get("devices"), + get("cpu"), + } { + os.RemoveAll(path) + } + return nil +} + func parseCgroupFile(subsystem string, r io.Reader) (string, error) { s := bufio.NewScanner(r) - for s.Scan() { if err := s.Err(); err != nil { return "", err } text := s.Text() parts := strings.Split(text, ":") - if parts[1] == subsystem { - return parts[2], nil + for _, subs := range strings.Split(parts[1], ",") { + if subs == subsystem { + return parts[2], nil + } } } return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", subsystem) } + +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + +func (c *Cgroup) Apply(pid int) error { + // We have two implementation of cgroups support, one is based on + // systemd and the dbus api, and one is based on raw cgroup fs operations + // following the pre-single-writer model docs at: + // http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/ + // + // we can pick any subsystem to find the root + cgroupRoot, err := FindCgroupMountpoint("cpu") + if err != nil { + return err + } + cgroupRoot = filepath.Dir(cgroupRoot) + + if _, err := os.Stat(cgroupRoot); err != nil { + return fmt.Errorf("cgroups fs not found") + } + if err := c.setupDevices(cgroupRoot, pid); err != nil { + return err + } + if err := c.setupMemory(cgroupRoot, pid); err != nil { + return err + } + if err := c.setupCpu(cgroupRoot, pid); err != nil { + return err + } + return nil +} + +func (c *Cgroup) setupDevices(cgroupRoot string, pid int) (err error) { + if !c.DeviceAccess { + dir, err := c.Join(cgroupRoot, "devices", pid) + if err != nil { + return err + } + + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + if err := writeFile(dir, "devices.deny", "a"); err != nil { + return err + } + + allow := []string{ + // /dev/null, zero, full + "c 1:3 rwm", + "c 1:5 rwm", + "c 1:7 rwm", + + // consoles + "c 5:1 rwm", + "c 5:0 rwm", + "c 4:0 rwm", + "c 4:1 rwm", + + // /dev/urandom,/dev/random + "c 1:9 rwm", + "c 1:8 rwm", + + // /dev/pts/ - pts namespaces are "coming soon" + "c 136:* rwm", + "c 5:2 rwm", + + // tuntap + "c 10:200 rwm", + } + + for _, val := range allow { + if err := writeFile(dir, "devices.allow", val); err != nil { + return err + } + } + } + return nil +} + +func (c *Cgroup) setupMemory(cgroupRoot string, pid int) (err error) { + if c.Memory != 0 || c.MemorySwap != 0 { + dir, err := c.Join(cgroupRoot, "memory", pid) + if err != nil { + return err + } + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + if c.Memory != 0 { + if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { + return err + } + if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { + return err + } + } + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to `-1'. + if c.MemorySwap != -1 { + if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Memory*2, 10)); err != nil { + return err + } + } + } + return nil +} + +func (c *Cgroup) setupCpu(cgroupRoot string, pid int) (err error) { + // We always want to join the cpu group, to allow fair cpu scheduling + // on a container basis + dir, err := c.Join(cgroupRoot, "cpu", pid) + if err != nil { + return err + } + if c.CpuShares != 0 { + if err := writeFile(dir, "cpu.shares", strconv.FormatInt(c.CpuShares, 10)); err != nil { + return err + } + } + return nil +} diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go index 9e2466b692..46a23b1e7b 100644 --- a/pkg/graphdb/graphdb.go +++ b/pkg/graphdb/graphdb.go @@ -4,6 +4,7 @@ import ( "database/sql" "fmt" "path" + "strings" "sync" ) @@ -51,6 +52,21 @@ type Database struct { mux sync.RWMutex } +func IsNonUniqueNameError(err error) bool { + str := err.Error() + // sqlite 3.7.17-1ubuntu1 returns: + // Set failure: Abort due to constraint violation: columns parent_id, name are not unique + if strings.HasSuffix(str, "name are not unique") { + return true + } + // sqlite-3.8.3-1.fc20 returns: + // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name + if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { + return true + } + return false +} + // Create a new graph database initialized with a root entity func NewDatabase(conn *sql.DB, init bool) (*Database, error) { if conn == nil { diff --git a/pkg/libcontainer/MAINTAINERS b/pkg/libcontainer/MAINTAINERS new file mode 100644 index 0000000000..e53d933d47 --- /dev/null +++ b/pkg/libcontainer/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume Charmes (@creack) diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md new file mode 100644 index 0000000000..d6e4dedd63 --- /dev/null +++ b/pkg/libcontainer/README.md @@ -0,0 +1,90 @@ +## libcontainer - reference implementation for containers + +#### background + +libcontainer specifies configuration options for what a container is. It provides a native Go implementation +for using linux namespaces with no external dependencies. libcontainer provides many convience functions for working with namespaces, networking, and management. + + +#### container +A container is a self contained directory that is able to run one or more processes without +affecting the host system. The directory is usually a full system tree. Inside the directory +a `container.json` file is placed with the runtime configuration for how the processes +should be contained and ran. Environment, networking, and different capabilities for the +process are specified in this file. The configuration is used for each process executed inside the container. + +Sample `container.json` file: +```json +{ + "hostname": "koye", + "tty": true, + "environment": [ + "HOME=/", + "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", + "container=docker", + "TERM=xterm-256color" + ], + "namespaces": [ + "NEWIPC", + "NEWNS", + "NEWPID", + "NEWUTS", + "NEWNET" + ], + "capabilities": [ + "SETPCAP", + "SYS_MODULE", + "SYS_RAWIO", + "SYS_PACCT", + "SYS_ADMIN", + "SYS_NICE", + "SYS_RESOURCE", + "SYS_TIME", + "SYS_TTY_CONFIG", + "MKNOD", + "AUDIT_WRITE", + "AUDIT_CONTROL", + "MAC_OVERRIDE", + "MAC_ADMIN", + "NET_ADMIN" + ], + "networks": [{ + "type": "veth", + "context": { + "bridge": "docker0", + "prefix": "dock" + }, + "address": "172.17.0.100/16", + "gateway": "172.17.42.1", + "mtu": 1500 + } + ], + "cgroups": { + "name": "docker-koye", + "parent": "docker", + "memory": 5248000 + } +} +``` + +Using this configuration and the current directory holding the rootfs for a process, one can use libcontainer to exec the container. Running the life of the namespace, a `pid` file +is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run an new process inside an existing container with a live namespace the namespace will be joined by the new process. + + +You may also specify an alternate root place where the `container.json` file is read and where the `pid` file will be saved. + +#### nsinit + +`nsinit` is a cli application used as the reference implementation of libcontainer. It is able to +spawn or join new containers giving the current directory. To use `nsinit` cd into a linux +rootfs and copy a `container.json` file into the directory with your specified configuration. + +To execute `/bin/bash` in the current directory as a container just run: +```bash +nsinit exec /bin/bash +``` + +If you wish to spawn another process inside the container while your current bash session is +running just run the exact same command again to get another bash shell or change the command. If the original process dies, PID 1, all other processes spawned inside the container will also be killed and the namespace will be removed. + +You can identify if a process is running in a container by looking to see if `pid` is in the root of the directory. diff --git a/pkg/libcontainer/TODO.md b/pkg/libcontainer/TODO.md new file mode 100644 index 0000000000..f18c0b4c51 --- /dev/null +++ b/pkg/libcontainer/TODO.md @@ -0,0 +1,17 @@ +#### goals +* small and simple - line count is not everything but less code is better +* clean lines between what we do in the pkg +* provide primitives for working with namespaces not cater to every option +* extend via configuration not by features - host networking, no networking, veth network can be accomplished via adjusting the container.json, nothing to do with code + +#### tasks +* proper tty for a new process in an existing container +* use exec or raw syscalls for new process in existing container +* setup proper user in namespace if specified +* implement hook or clean interface for cgroups +* example configs for different setups (host networking, boot init) +* improve pkg documentation with comments +* testing - this is hard in a low level pkg but we could do some, maybe +* pivot root +* selinux +* apparmor diff --git a/pkg/libcontainer/apparmor/apparmor.go b/pkg/libcontainer/apparmor/apparmor.go new file mode 100644 index 0000000000..a6d57d4f09 --- /dev/null +++ b/pkg/libcontainer/apparmor/apparmor.go @@ -0,0 +1,31 @@ +// +build apparmor,linux,amd64 + +package apparmor + +// #cgo LDFLAGS: -lapparmor +// #include +// #include +import "C" +import ( + "io/ioutil" + "unsafe" +) + +func IsEnabled() bool { + buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + return err == nil && len(buf) > 1 && buf[0] == 'Y' +} + +func ApplyProfile(pid int, name string) error { + if !IsEnabled() || name == "" { + return nil + } + + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + if _, err := C.aa_change_onexec(cName); err != nil { + return err + } + return nil +} diff --git a/pkg/libcontainer/apparmor/apparmor_disabled.go b/pkg/libcontainer/apparmor/apparmor_disabled.go new file mode 100644 index 0000000000..77543e4a87 --- /dev/null +++ b/pkg/libcontainer/apparmor/apparmor_disabled.go @@ -0,0 +1,13 @@ +// +build !apparmor !linux !amd64 + +package apparmor + +import () + +func IsEnabled() bool { + return false +} + +func ApplyProfile(pid int, name string) error { + return nil +} diff --git a/pkg/libcontainer/apparmor/setup.go b/pkg/libcontainer/apparmor/setup.go new file mode 100644 index 0000000000..e07759cc64 --- /dev/null +++ b/pkg/libcontainer/apparmor/setup.go @@ -0,0 +1,97 @@ +package apparmor + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" +) + +const DefaultProfilePath = "/etc/apparmor.d/docker" +const DefaultProfile = ` +# AppArmor profile from lxc for containers. +@{HOME}=@{HOMEDIRS}/*/ /root/ +@{HOMEDIRS}=/home/ +#@{HOMEDIRS}+= +@{multiarch}=*-linux-gnu* +@{PROC}=/proc/ + +profile docker-default flags=(attach_disconnected,mediate_deleted) { + network, + capability, + file, + umount, + + # ignore DENIED message on / remount + deny mount options=(ro, remount) -> /, + + # allow tmpfs mounts everywhere + mount fstype=tmpfs, + + # allow mqueue mounts everywhere + mount fstype=mqueue, + + # allow fuse mounts everywhere + mount fstype=fuse.*, + + # allow bind mount of /lib/init/fstab for lxcguest + mount options=(rw, bind) /lib/init/fstab.lxc/ -> /lib/init/fstab/, + + # deny writes in /proc/sys/fs but allow binfmt_misc to be mounted + mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, + deny @{PROC}/sys/fs/** wklx, + + # allow efivars to be mounted, writing to it will be blocked though + mount fstype=efivarfs -> /sys/firmware/efi/efivars/, + + # block some other dangerous paths + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, + deny @{PROC}/sys/kernel/*/** wklx, + + # deny writes in /sys except for /sys/fs/cgroup, also allow + # fusectl, securityfs and debugfs to be mounted there (read-only) + mount fstype=fusectl -> /sys/fs/fuse/connections/, + mount fstype=securityfs -> /sys/kernel/security/, + mount fstype=debugfs -> /sys/kernel/debug/, + deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, + mount fstype=proc -> /proc/, + mount fstype=sysfs -> /sys/, + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, + mount options=(move) /sys/fs/cgroup/cgmanager/ -> /sys/fs/cgroup/cgmanager.lower/, + + # the container may never be allowed to mount devpts. If it does, it + # will remount the host's devpts. We could allow it to do it with + # the newinstance option (but, right now, we don't). + deny mount fstype=devpts, +} +` + +func InstallDefaultProfile() error { + if !IsEnabled() { + return nil + } + + // If the profile already exists, let it be. + if _, err := os.Stat(DefaultProfilePath); err == nil { + return nil + } + + if err := ioutil.WriteFile(DefaultProfilePath, []byte(DefaultProfile), 0644); err != nil { + return err + } + + output, err := exec.Command("/lib/init/apparmor-profile-load", "docker").CombinedOutput() + if err != nil { + return fmt.Errorf("Error loading docker profile: %s (%s)", err, output) + } + return nil +} diff --git a/pkg/libcontainer/capabilities/capabilities.go b/pkg/libcontainer/capabilities/capabilities.go new file mode 100644 index 0000000000..3c6d752496 --- /dev/null +++ b/pkg/libcontainer/capabilities/capabilities.go @@ -0,0 +1,33 @@ +package capabilities + +import ( + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/syndtr/gocapability/capability" + "os" +) + +// DropCapabilities drops capabilities for the current process based +// on the container's configuration. +func DropCapabilities(container *libcontainer.Container) error { + if drop := getCapabilities(container); len(drop) > 0 { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + c.Unset(capability.CAPS|capability.BOUNDS, drop...) + + if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { + return err + } + } + return nil +} + +// getCapabilities returns the specific cap values for the libcontainer types +func getCapabilities(container *libcontainer.Container) []capability.Cap { + drop := []capability.Cap{} + for _, c := range container.Capabilities { + drop = append(drop, c.Value) + } + return drop +} diff --git a/pkg/libcontainer/container.go b/pkg/libcontainer/container.go new file mode 100644 index 0000000000..a777da58a4 --- /dev/null +++ b/pkg/libcontainer/container.go @@ -0,0 +1,38 @@ +package libcontainer + +import ( + "github.com/dotcloud/docker/pkg/cgroups" +) + +// Context is a generic key value pair that allows +// arbatrary data to be sent +type Context map[string]string + +// Container defines configuration options for how a +// container is setup inside a directory and how a process should be executed +type Container struct { + Hostname string `json:"hostname,omitempty"` // hostname + ReadonlyFs bool `json:"readonly_fs,omitempty"` // set the containers rootfs as readonly + NoPivotRoot bool `json:"no_pivot_root,omitempty"` // this can be enabled if you are running in ramdisk + User string `json:"user,omitempty"` // user to execute the process as + WorkingDir string `json:"working_dir,omitempty"` // current working directory + Env []string `json:"environment,omitempty"` // environment to set + Tty bool `json:"tty,omitempty"` // setup a proper tty or not + Namespaces Namespaces `json:"namespaces,omitempty"` // namespaces to apply + Capabilities Capabilities `json:"capabilities,omitempty"` // capabilities to drop + Networks []*Network `json:"networks,omitempty"` // nil for host's network stack + Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups + Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) +} + +// Network defines configuration for a container's networking stack +// +// The network configuration can be omited from a container causing the +// container to be setup with the host's networking stack +type Network struct { + Type string `json:"type,omitempty"` // type of networking to setup i.e. veth, macvlan, etc + Context Context `json:"context,omitempty"` // generic context for type specific networking options + Address string `json:"address,omitempty"` + Gateway string `json:"gateway,omitempty"` + Mtu int `json:"mtu,omitempty"` +} diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json new file mode 100644 index 0000000000..83e407467c --- /dev/null +++ b/pkg/libcontainer/container.json @@ -0,0 +1,50 @@ +{ + "hostname": "koye", + "tty": true, + "environment": [ + "HOME=/", + "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", + "container=docker", + "TERM=xterm-256color" + ], + "namespaces": [ + "NEWIPC", + "NEWNS", + "NEWPID", + "NEWUTS", + "NEWNET" + ], + "capabilities": [ + "SETPCAP", + "SYS_MODULE", + "SYS_RAWIO", + "SYS_PACCT", + "SYS_ADMIN", + "SYS_NICE", + "SYS_RESOURCE", + "SYS_TIME", + "SYS_TTY_CONFIG", + "MKNOD", + "AUDIT_WRITE", + "AUDIT_CONTROL", + "MAC_OVERRIDE", + "MAC_ADMIN", + "NET_ADMIN" + ], + "networks": [{ + "type": "veth", + "context": { + "bridge": "docker0", + "prefix": "dock" + }, + "address": "172.17.0.100/16", + "gateway": "172.17.42.1", + "mtu": 1500 + } + ], + "cgroups": { + "name": "docker-koye", + "parent": "docker", + "memory": 5248000 + } +} diff --git a/pkg/libcontainer/network/network.go b/pkg/libcontainer/network/network.go new file mode 100644 index 0000000000..8c7a4b618e --- /dev/null +++ b/pkg/libcontainer/network/network.go @@ -0,0 +1,78 @@ +package network + +import ( + "github.com/dotcloud/docker/pkg/netlink" + "net" +) + +func InterfaceUp(name string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkLinkUp(iface) +} + +func InterfaceDown(name string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkLinkDown(iface) +} + +func ChangeInterfaceName(old, newName string) error { + iface, err := net.InterfaceByName(old) + if err != nil { + return err + } + return netlink.NetworkChangeName(iface, newName) +} + +func CreateVethPair(name1, name2 string) error { + return netlink.NetworkCreateVethPair(name1, name2) +} + +func SetInterfaceInNamespacePid(name string, nsPid int) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetNsPid(iface, nsPid) +} + +func SetInterfaceMaster(name, master string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + masterIface, err := net.InterfaceByName(master) + if err != nil { + return err + } + return netlink.NetworkSetMaster(iface, masterIface) +} + +func SetDefaultGateway(ip string) error { + return netlink.AddDefaultGw(net.ParseIP(ip)) +} + +func SetInterfaceIp(name string, rawIp string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + ip, ipNet, err := net.ParseCIDR(rawIp) + if err != nil { + return err + } + return netlink.NetworkLinkAddIp(iface, ip, ipNet) +} + +func SetMtu(name string, mtu int) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetMTU(iface, mtu) +} diff --git a/pkg/libcontainer/network/strategy.go b/pkg/libcontainer/network/strategy.go new file mode 100644 index 0000000000..234fcc0aa2 --- /dev/null +++ b/pkg/libcontainer/network/strategy.go @@ -0,0 +1,32 @@ +package network + +import ( + "errors" + "github.com/dotcloud/docker/pkg/libcontainer" +) + +var ( + ErrNotValidStrategyType = errors.New("not a valid network strategy type") +) + +var strategies = map[string]NetworkStrategy{ + "veth": &Veth{}, +} + +// NetworkStrategy represents a specific network configuration for +// a container's networking stack +type NetworkStrategy interface { + Create(*libcontainer.Network, int, libcontainer.Context) error + Initialize(*libcontainer.Network, libcontainer.Context) error +} + +// GetStrategy returns the specific network strategy for the +// provided type. If no strategy is registered for the type an +// ErrNotValidStrategyType is returned. +func GetStrategy(tpe string) (NetworkStrategy, error) { + s, exists := strategies[tpe] + if !exists { + return nil, ErrNotValidStrategyType + } + return s, nil +} diff --git a/pkg/libcontainer/network/veth.go b/pkg/libcontainer/network/veth.go new file mode 100644 index 0000000000..3ab1b2393b --- /dev/null +++ b/pkg/libcontainer/network/veth.go @@ -0,0 +1,100 @@ +package network + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/utils" +) + +// Veth is a network strategy that uses a bridge and creates +// a veth pair, one that stays outside on the host and the other +// is placed inside the container's namespace +type Veth struct { +} + +func (v *Veth) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error { + var ( + bridge string + prefix string + exists bool + ) + if bridge, exists = n.Context["bridge"]; !exists { + return fmt.Errorf("bridge does not exist in network context") + } + if prefix, exists = n.Context["prefix"]; !exists { + return fmt.Errorf("veth prefix does not exist in network context") + } + name1, name2, err := createVethPair(prefix) + if err != nil { + return err + } + context["veth-host"] = name1 + context["veth-child"] = name2 + if err := SetInterfaceMaster(name1, bridge); err != nil { + return err + } + if err := SetMtu(name1, n.Mtu); err != nil { + return err + } + if err := InterfaceUp(name1); err != nil { + return err + } + if err := SetInterfaceInNamespacePid(name2, nspid); err != nil { + return err + } + return nil +} + +func (v *Veth) Initialize(config *libcontainer.Network, context libcontainer.Context) error { + var ( + vethChild string + exists bool + ) + if vethChild, exists = context["veth-child"]; !exists { + return fmt.Errorf("vethChild does not exist in network context") + } + if err := InterfaceDown(vethChild); err != nil { + return fmt.Errorf("interface down %s %s", vethChild, err) + } + if err := ChangeInterfaceName(vethChild, "eth0"); err != nil { + return fmt.Errorf("change %s to eth0 %s", vethChild, err) + } + if err := SetInterfaceIp("eth0", config.Address); err != nil { + return fmt.Errorf("set eth0 ip %s", err) + } + if err := SetMtu("eth0", config.Mtu); err != nil { + return fmt.Errorf("set eth0 mtu to %d %s", config.Mtu, err) + } + if err := InterfaceUp("eth0"); err != nil { + return fmt.Errorf("eth0 up %s", err) + } + if err := SetMtu("lo", config.Mtu); err != nil { + return fmt.Errorf("set lo mtu to %d %s", config.Mtu, err) + } + if err := InterfaceUp("lo"); err != nil { + return fmt.Errorf("lo up %s", err) + } + if config.Gateway != "" { + if err := SetDefaultGateway(config.Gateway); err != nil { + return fmt.Errorf("set gateway to %s %s", config.Gateway, err) + } + } + return nil +} + +// createVethPair will automatically generage two random names for +// the veth pair and ensure that they have been created +func createVethPair(prefix string) (name1 string, name2 string, err error) { + name1, err = utils.GenerateRandomName(prefix, 4) + if err != nil { + return + } + name2, err = utils.GenerateRandomName(prefix, 4) + if err != nil { + return + } + if err = CreateVethPair(name1, name2); err != nil { + return + } + return +} diff --git a/pkg/libcontainer/nsinit/command.go b/pkg/libcontainer/nsinit/command.go new file mode 100644 index 0000000000..5546065b6d --- /dev/null +++ b/pkg/libcontainer/nsinit/command.go @@ -0,0 +1,45 @@ +package nsinit + +import ( + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/system" + "os" + "os/exec" +) + +// CommandFactory takes the container's configuration and options passed by the +// parent processes and creates an *exec.Cmd that will be used to fork/exec the +// namespaced init process +type CommandFactory interface { + Create(container *libcontainer.Container, console string, syncFd *os.File, args []string) *exec.Cmd +} + +type DefaultCommandFactory struct { + Root string +} + +// Create will return an exec.Cmd with the Cloneflags set to the proper namespaces +// defined on the container's configuration and use the current binary as the init with the +// args provided +func (c *DefaultCommandFactory) Create(container *libcontainer.Container, console string, pipe *os.File, args []string) *exec.Cmd { + // get our binary name from arg0 so we can always reexec ourself + command := exec.Command(os.Args[0], append([]string{ + "-console", console, + "-pipe", "3", + "-root", c.Root, + "init"}, args...)...) + + system.SetCloneFlags(command, uintptr(GetNamespaceFlags(container.Namespaces))) + command.Env = container.Env + command.ExtraFiles = []*os.File{pipe} + return command +} + +// GetNamespaceFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare, and setns +func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { + for _, ns := range namespaces { + flag |= ns.Value + } + return flag +} diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go new file mode 100644 index 0000000000..4963f126e9 --- /dev/null +++ b/pkg/libcontainer/nsinit/exec.go @@ -0,0 +1,96 @@ +// +build linux + +package nsinit + +import ( + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/network" + "github.com/dotcloud/docker/pkg/system" + "os" + "os/exec" + "syscall" +) + +// Exec performes setup outside of a namespace so that a container can be +// executed. Exec is a high level function for working with container namespaces. +func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) { + var ( + master *os.File + console string + err error + ) + + // create a pipe so that we can syncronize with the namespaced process and + // pass the veth name to the child + syncPipe, err := NewSyncPipe() + if err != nil { + return -1, err + } + + if container.Tty { + master, console, err = system.CreateMasterAndConsole() + if err != nil { + return -1, err + } + term.SetMaster(master) + } + + command := ns.commandFactory.Create(container, console, syncPipe.child, args) + if err := term.Attach(command); err != nil { + return -1, err + } + defer term.Close() + + if err := command.Start(); err != nil { + return -1, err + } + if err := ns.stateWriter.WritePid(command.Process.Pid); err != nil { + command.Process.Kill() + return -1, err + } + defer ns.stateWriter.DeletePid() + + // Do this before syncing with child so that no children + // can escape the cgroup + if err := ns.SetupCgroups(container, command.Process.Pid); err != nil { + command.Process.Kill() + return -1, err + } + if err := ns.InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { + command.Process.Kill() + return -1, err + } + + // Sync with child + syncPipe.Close() + + if err := command.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return -1, err + } + } + return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil +} + +func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) error { + if container.Cgroups != nil { + if err := container.Cgroups.Apply(nspid); err != nil { + return err + } + } + return nil +} + +func (ns *linuxNs) InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error { + context := libcontainer.Context{} + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + if err := strategy.Create(config, nspid, context); err != nil { + return err + } + } + return pipe.SendToChild(context) +} diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go new file mode 100644 index 0000000000..488fe0e248 --- /dev/null +++ b/pkg/libcontainer/nsinit/execin.go @@ -0,0 +1,94 @@ +// +build linux + +package nsinit + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/system" + "os" + "path/filepath" + "strconv" + "syscall" +) + +// ExecIn uses an existing pid and joins the pid's namespaces with the new command. +func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { + for _, ns := range container.Namespaces { + if err := system.Unshare(ns.Value); err != nil { + return -1, err + } + } + fds, err := ns.getNsFds(nspid, container) + closeFds := func() { + for _, f := range fds { + system.Closefd(f) + } + } + if err != nil { + closeFds() + return -1, err + } + + // foreach namespace fd, use setns to join an existing container's namespaces + for _, fd := range fds { + if fd > 0 { + if err := system.Setns(fd, 0); err != nil { + closeFds() + return -1, fmt.Errorf("setns %s", err) + } + } + system.Closefd(fd) + } + + // if the container has a new pid and mount namespace we need to + // remount proc and sys to pick up the changes + if container.Namespaces.Contains("NEWNS") && container.Namespaces.Contains("NEWPID") { + pid, err := system.Fork() + if err != nil { + return -1, err + } + if pid == 0 { + // TODO: make all raw syscalls to be fork safe + if err := system.Unshare(syscall.CLONE_NEWNS); err != nil { + return -1, err + } + if err := remountProc(); err != nil { + return -1, fmt.Errorf("remount proc %s", err) + } + if err := remountSys(); err != nil { + return -1, fmt.Errorf("remount sys %s", err) + } + goto dropAndExec + } + proc, err := os.FindProcess(pid) + if err != nil { + return -1, err + } + state, err := proc.Wait() + if err != nil { + return -1, err + } + os.Exit(state.Sys().(syscall.WaitStatus).ExitStatus()) + } +dropAndExec: + if err := finalizeNamespace(container); err != nil { + return -1, err + } + if err := system.Execv(args[0], args[0:], container.Env); err != nil { + return -1, err + } + panic("unreachable") +} + +func (ns *linuxNs) getNsFds(pid int, container *libcontainer.Container) ([]uintptr, error) { + fds := make([]uintptr, len(container.Namespaces)) + for i, ns := range container.Namespaces { + f, err := os.OpenFile(filepath.Join("/proc/", strconv.Itoa(pid), "ns", ns.File), os.O_RDONLY, 0) + if err != nil { + return fds, err + } + fds[i] = f.Fd() + } + return fds, nil +} diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go new file mode 100644 index 0000000000..336fc1eaaf --- /dev/null +++ b/pkg/libcontainer/nsinit/init.go @@ -0,0 +1,147 @@ +// +build linux + +package nsinit + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/apparmor" + "github.com/dotcloud/docker/pkg/libcontainer/capabilities" + "github.com/dotcloud/docker/pkg/libcontainer/network" + "github.com/dotcloud/docker/pkg/libcontainer/utils" + "github.com/dotcloud/docker/pkg/system" + "github.com/dotcloud/docker/pkg/user" + "os" + "syscall" +) + +// Init is the init process that first runs inside a new namespace to setup mounts, users, networking, +// and other options required for the new container. +func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error { + rootfs, err := utils.ResolveRootfs(uncleanRootfs) + if err != nil { + return err + } + + // We always read this as it is a way to sync with the parent as well + context, err := syncPipe.ReadFromParent() + if err != nil { + syncPipe.Close() + return err + } + syncPipe.Close() + + if console != "" { + slave, err := system.OpenTerminal(console, syscall.O_RDWR) + if err != nil { + return fmt.Errorf("open terminal %s", err) + } + if err := dupSlave(slave); err != nil { + return fmt.Errorf("dup2 slave %s", err) + } + } + if _, err := system.Setsid(); err != nil { + return fmt.Errorf("setsid %s", err) + } + if console != "" { + if err := system.Setctty(); err != nil { + return fmt.Errorf("setctty %s", err) + } + } + if err := system.ParentDeathSignal(); err != nil { + return fmt.Errorf("parent death signal %s", err) + } + if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs, container.NoPivotRoot); err != nil { + return fmt.Errorf("setup mount namespace %s", err) + } + if err := setupNetwork(container, context); err != nil { + return fmt.Errorf("setup networking %s", err) + } + if err := system.Sethostname(container.Hostname); err != nil { + return fmt.Errorf("sethostname %s", err) + } + if err := finalizeNamespace(container); err != nil { + return fmt.Errorf("finalize namespace %s", err) + } + + if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil { + return err + } + return system.Execv(args[0], args[0:], container.Env) +} + +func setupUser(container *libcontainer.Container) error { + switch container.User { + case "root", "": + if err := system.Setgroups(nil); err != nil { + return err + } + if err := system.Setresgid(0, 0, 0); err != nil { + return err + } + if err := system.Setresuid(0, 0, 0); err != nil { + return err + } + default: + uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid()) + if err != nil { + return err + } + if err := system.Setgroups(suppGids); err != nil { + return err + } + if err := system.Setgid(gid); err != nil { + return err + } + if err := system.Setuid(uid); err != nil { + return err + } + } + return nil +} + +// dupSlave dup2 the pty slave's fd into stdout and stdin and ensures that +// the slave's fd is 0, or stdin +func dupSlave(slave *os.File) error { + if err := system.Dup2(slave.Fd(), 0); err != nil { + return err + } + if err := system.Dup2(slave.Fd(), 1); err != nil { + return err + } + if err := system.Dup2(slave.Fd(), 2); err != nil { + return err + } + return nil +} + +// setupVethNetwork uses the Network config if it is not nil to initialize +// the new veth interface inside the container for use by changing the name to eth0 +// setting the MTU and IP address along with the default gateway +func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error { + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + return strategy.Initialize(config, context) + } + return nil +} + +// finalizeNamespace drops the caps and sets the correct user +// and working dir before execing the command inside the namespace +func finalizeNamespace(container *libcontainer.Container) error { + if err := capabilities.DropCapabilities(container); err != nil { + return fmt.Errorf("drop capabilities %s", err) + } + if err := setupUser(container); err != nil { + return fmt.Errorf("setup user %s", err) + } + if container.WorkingDir != "" { + if err := system.Chdir(container.WorkingDir); err != nil { + return fmt.Errorf("chdir to %s %s", container.WorkingDir, err) + } + } + return nil +} diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go new file mode 100644 index 0000000000..83577cfa8c --- /dev/null +++ b/pkg/libcontainer/nsinit/mount.go @@ -0,0 +1,277 @@ +// +build linux + +package nsinit + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/system" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +// default mount point flags +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +// setupNewMountNamespace is used to initialize a new mount namespace for an new +// container in the rootfs that is specified. +// +// There is no need to unmount the new mounts because as soon as the mount namespace +// is no longer in use, the mounts will be removed automatically +func setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool) error { + flag := syscall.MS_PRIVATE + if noPivotRoot { + flag = syscall.MS_SLAVE + } + if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil { + return fmt.Errorf("mounting / as slave %s", err) + } + if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("mouting %s as bind %s", rootfs, err) + } + if readonly { + if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("mounting %s as readonly %s", rootfs, err) + } + } + if err := mountSystem(rootfs); err != nil { + return fmt.Errorf("mount system %s", err) + } + if err := copyDevNodes(rootfs); err != nil { + return fmt.Errorf("copy dev nodes %s", err) + } + // In non-privileged mode, this fails. Discard the error. + setupLoopbackDevices(rootfs) + if err := setupDev(rootfs); err != nil { + return err + } + if console != "" { + if err := setupPtmx(rootfs, console); err != nil { + return err + } + } + if err := system.Chdir(rootfs); err != nil { + return fmt.Errorf("chdir into %s %s", rootfs, err) + } + + if noPivotRoot { + if err := rootMsMove(rootfs); err != nil { + return err + } + } else { + if err := rootPivot(rootfs); err != nil { + return err + } + } + + system.Umask(0022) + + return nil +} + +// use a pivot root to setup the rootfs +func rootPivot(rootfs string) error { + pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root") + if err != nil { + return fmt.Errorf("can't create pivot_root dir %s", pivotDir, err) + } + if err := system.Pivotroot(rootfs, pivotDir); err != nil { + return fmt.Errorf("pivot_root %s", err) + } + if err := system.Chdir("/"); err != nil { + return fmt.Errorf("chdir / %s", err) + } + // path to pivot dir now changed, update + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + if err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("unmount pivot_root dir %s", err) + } + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("remove pivot_root dir %s", err) + } + return nil +} + +// use MS_MOVE and chroot to setup the rootfs +func rootMsMove(rootfs string) error { + if err := system.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { + return fmt.Errorf("mount move %s into / %s", rootfs, err) + } + if err := system.Chroot("."); err != nil { + return fmt.Errorf("chroot . %s", err) + } + if err := system.Chdir("/"); err != nil { + return fmt.Errorf("chdir / %s", err) + } + return nil +} + +// copyDevNodes mknods the hosts devices so the new container has access to them +func copyDevNodes(rootfs string) error { + oldMask := system.Umask(0000) + defer system.Umask(oldMask) + + for _, node := range []string{ + "null", + "zero", + "full", + "random", + "urandom", + "tty", + } { + if err := copyDevNode(rootfs, node); err != nil { + return err + } + } + return nil +} + +func setupLoopbackDevices(rootfs string) error { + for i := 0; ; i++ { + if err := copyDevNode(rootfs, fmt.Sprintf("loop%d", i)); err != nil { + if !os.IsNotExist(err) { + return err + } + break + } + + } + return nil +} + +func copyDevNode(rootfs, node string) error { + stat, err := os.Stat(filepath.Join("/dev", node)) + if err != nil { + return err + } + var ( + dest = filepath.Join(rootfs, "dev", node) + st = stat.Sys().(*syscall.Stat_t) + ) + if err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) { + return fmt.Errorf("copy %s %s", node, err) + } + return nil +} + +// setupDev symlinks the current processes pipes into the +// appropriate destination on the containers rootfs +func setupDev(rootfs string) error { + for _, link := range []struct { + from string + to string + }{ + {"/proc/kcore", "/dev/core"}, + {"/proc/self/fd", "/dev/fd"}, + {"/proc/self/fd/0", "/dev/stdin"}, + {"/proc/self/fd/1", "/dev/stdout"}, + {"/proc/self/fd/2", "/dev/stderr"}, + } { + dest := filepath.Join(rootfs, link.to) + if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("remove %s %s", dest, err) + } + if err := os.Symlink(link.from, dest); err != nil { + return fmt.Errorf("symlink %s %s", dest, err) + } + } + return nil +} + +// setupConsole ensures that the container has a proper /dev/console setup +func setupConsole(rootfs, console string) error { + oldMask := system.Umask(0000) + defer system.Umask(oldMask) + + stat, err := os.Stat(console) + if err != nil { + return fmt.Errorf("stat console %s %s", console, err) + } + var ( + st = stat.Sys().(*syscall.Stat_t) + dest = filepath.Join(rootfs, "dev/console") + ) + if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("remove %s %s", dest, err) + } + if err := os.Chmod(console, 0600); err != nil { + return err + } + if err := os.Chown(console, 0, 0); err != nil { + return err + } + if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil { + return fmt.Errorf("mknod %s %s", dest, err) + } + if err := system.Mount(console, dest, "bind", syscall.MS_BIND, ""); err != nil { + return fmt.Errorf("bind %s to %s %s", console, dest, err) + } + return nil +} + +// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts +// inside the mount namespace +func mountSystem(rootfs string) error { + for _, m := range []struct { + source string + path string + device string + flags int + data string + }{ + {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, + {source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}, + {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: "mode=1777,size=65536k"}, + {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: "newinstance,ptmxmode=0666,mode=620,gid=5"}, + } { + if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) { + return fmt.Errorf("mkdirall %s %s", m.path, err) + } + if err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil { + return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err) + } + } + return nil +} + +// setupPtmx adds a symlink to pts/ptmx for /dev/ptmx and +// finishes setting up /dev/console +func setupPtmx(rootfs, console string) error { + ptmx := filepath.Join(rootfs, "dev/ptmx") + if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { + return err + } + if err := os.Symlink("pts/ptmx", ptmx); err != nil { + return fmt.Errorf("symlink dev ptmx %s", err) + } + if err := setupConsole(rootfs, console); err != nil { + return err + } + return nil +} + +// remountProc is used to detach and remount the proc filesystem +// commonly needed with running a new process inside an existing container +func remountProc() error { + if err := system.Unmount("/proc", syscall.MNT_DETACH); err != nil { + return err + } + if err := system.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil { + return err + } + return nil +} + +func remountSys() error { + if err := system.Unmount("/sys", syscall.MNT_DETACH); err != nil { + if err != syscall.EINVAL { + return err + } + } else { + if err := system.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil { + return err + } + } + return nil +} diff --git a/pkg/libcontainer/nsinit/nsinit.go b/pkg/libcontainer/nsinit/nsinit.go new file mode 100644 index 0000000000..f09a130aa2 --- /dev/null +++ b/pkg/libcontainer/nsinit/nsinit.go @@ -0,0 +1,26 @@ +package nsinit + +import ( + "github.com/dotcloud/docker/pkg/libcontainer" +) + +// NsInit is an interface with the public facing methods to provide high level +// exec operations on a container +type NsInit interface { + Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) + ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) + Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error +} + +type linuxNs struct { + root string + commandFactory CommandFactory + stateWriter StateWriter +} + +func NewNsInit(command CommandFactory, state StateWriter) NsInit { + return &linuxNs{ + commandFactory: command, + stateWriter: state, + } +} diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go new file mode 100644 index 0000000000..61921c59a3 --- /dev/null +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -0,0 +1,110 @@ +package main + +import ( + "encoding/json" + "flag" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/nsinit" + "io/ioutil" + "log" + "os" + "path/filepath" + "strconv" +) + +var ( + root, console string + pipeFd int +) + +func registerFlags() { + flag.StringVar(&console, "console", "", "console (pty slave) path") + flag.IntVar(&pipeFd, "pipe", 0, "sync pipe fd") + flag.StringVar(&root, "root", ".", "root for storing configuration data") + + flag.Parse() +} + +func main() { + registerFlags() + + if flag.NArg() < 1 { + log.Fatalf("wrong number of argments %d", flag.NArg()) + } + container, err := loadContainer() + if err != nil { + log.Fatal(err) + } + ns, err := newNsInit() + if err != nil { + log.Fatal(err) + } + + switch flag.Arg(0) { + case "exec": // this is executed outside of the namespace in the cwd + var exitCode int + nspid, err := readPid() + if err != nil { + if !os.IsNotExist(err) { + log.Fatal(err) + } + } + if nspid > 0 { + exitCode, err = ns.ExecIn(container, nspid, flag.Args()[1:]) + } else { + term := nsinit.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) + exitCode, err = ns.Exec(container, term, flag.Args()[1:]) + } + if err != nil { + log.Fatal(err) + } + os.Exit(exitCode) + case "init": // this is executed inside of the namespace to setup the container + cwd, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + if flag.NArg() < 2 { + log.Fatalf("wrong number of argments %d", flag.NArg()) + } + syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd)) + if err != nil { + log.Fatal(err) + } + if err := ns.Init(container, cwd, console, syncPipe, flag.Args()[1:]); err != nil { + log.Fatal(err) + } + default: + log.Fatalf("command not supported for nsinit %s", flag.Arg(0)) + } +} + +func loadContainer() (*libcontainer.Container, error) { + f, err := os.Open(filepath.Join(root, "container.json")) + if err != nil { + return nil, err + } + defer f.Close() + + var container *libcontainer.Container + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + return container, nil +} + +func readPid() (int, error) { + data, err := ioutil.ReadFile(filepath.Join(root, "pid")) + if err != nil { + return -1, err + } + pid, err := strconv.Atoi(string(data)) + if err != nil { + return -1, err + } + return pid, nil +} + +func newNsInit() (nsinit.NsInit, error) { + return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}), nil +} diff --git a/pkg/libcontainer/nsinit/state.go b/pkg/libcontainer/nsinit/state.go new file mode 100644 index 0000000000..af38008c03 --- /dev/null +++ b/pkg/libcontainer/nsinit/state.go @@ -0,0 +1,28 @@ +package nsinit + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// StateWriter handles writing and deleting the pid file +// on disk +type StateWriter interface { + WritePid(pid int) error + DeletePid() error +} + +type DefaultStateWriter struct { + Root string +} + +// writePidFile writes the namespaced processes pid to pid in the rootfs for the container +func (d *DefaultStateWriter) WritePid(pid int) error { + return ioutil.WriteFile(filepath.Join(d.Root, "pid"), []byte(fmt.Sprint(pid)), 0655) +} + +func (d *DefaultStateWriter) DeletePid() error { + return os.Remove(filepath.Join(d.Root, "pid")) +} diff --git a/pkg/libcontainer/nsinit/sync_pipe.go b/pkg/libcontainer/nsinit/sync_pipe.go new file mode 100644 index 0000000000..f724f525f0 --- /dev/null +++ b/pkg/libcontainer/nsinit/sync_pipe.go @@ -0,0 +1,71 @@ +package nsinit + +import ( + "encoding/json" + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "io/ioutil" + "os" +) + +// SyncPipe allows communication to and from the child processes +// to it's parent and allows the two independent processes to +// syncronize their state. +type SyncPipe struct { + parent, child *os.File +} + +func NewSyncPipe() (s *SyncPipe, err error) { + s = &SyncPipe{} + s.child, s.parent, err = os.Pipe() + if err != nil { + return nil, err + } + return s, nil +} + +func NewSyncPipeFromFd(parendFd, childFd uintptr) (*SyncPipe, error) { + s := &SyncPipe{} + if parendFd > 0 { + s.parent = os.NewFile(parendFd, "parendPipe") + } else if childFd > 0 { + s.child = os.NewFile(childFd, "childPipe") + } else { + return nil, fmt.Errorf("no valid sync pipe fd specified") + } + return s, nil +} + +func (s *SyncPipe) SendToChild(context libcontainer.Context) error { + data, err := json.Marshal(context) + if err != nil { + return err + } + s.parent.Write(data) + return nil +} + +func (s *SyncPipe) ReadFromParent() (libcontainer.Context, error) { + data, err := ioutil.ReadAll(s.child) + if err != nil { + return nil, fmt.Errorf("error reading from sync pipe %s", err) + } + var context libcontainer.Context + if len(data) > 0 { + if err := json.Unmarshal(data, &context); err != nil { + return nil, err + } + } + return context, nil + +} + +func (s *SyncPipe) Close() error { + if s.parent != nil { + s.parent.Close() + } + if s.child != nil { + s.child.Close() + } + return nil +} diff --git a/pkg/libcontainer/nsinit/term.go b/pkg/libcontainer/nsinit/term.go new file mode 100644 index 0000000000..58dccab2b8 --- /dev/null +++ b/pkg/libcontainer/nsinit/term.go @@ -0,0 +1,118 @@ +package nsinit + +import ( + "github.com/dotcloud/docker/pkg/term" + "io" + "os" + "os/exec" +) + +type Terminal interface { + io.Closer + SetMaster(*os.File) + Attach(*exec.Cmd) error + Resize(h, w int) error +} + +func NewTerminal(stdin io.Reader, stdout, stderr io.Writer, tty bool) Terminal { + if tty { + return &TtyTerminal{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } + } + return &StdTerminal{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } +} + +type TtyTerminal struct { + stdin io.Reader + stdout, stderr io.Writer + master *os.File + state *term.State +} + +func (t *TtyTerminal) Resize(h, w int) error { + return term.SetWinsize(t.master.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyTerminal) SetMaster(master *os.File) { + t.master = master +} + +func (t *TtyTerminal) Attach(command *exec.Cmd) error { + go io.Copy(t.stdout, t.master) + go io.Copy(t.master, t.stdin) + + state, err := t.setupWindow(t.master, os.Stdin) + if err != nil { + command.Process.Kill() + return err + } + t.state = state + return err +} + +// SetupWindow gets the parent window size and sets the master +// pty to the current size and set the parents mode to RAW +func (t *TtyTerminal) setupWindow(master, parent *os.File) (*term.State, error) { + ws, err := term.GetWinsize(parent.Fd()) + if err != nil { + return nil, err + } + if err := term.SetWinsize(master.Fd(), ws); err != nil { + return nil, err + } + return term.SetRawTerminal(parent.Fd()) +} + +func (t *TtyTerminal) Close() error { + term.RestoreTerminal(os.Stdin.Fd(), t.state) + return t.master.Close() +} + +type StdTerminal struct { + stdin io.Reader + stdout, stderr io.Writer +} + +func (s *StdTerminal) SetMaster(*os.File) { + // no need to set master on non tty +} + +func (s *StdTerminal) Close() error { + return nil +} + +func (s *StdTerminal) Resize(h, w int) error { + return nil +} + +func (s *StdTerminal) Attach(command *exec.Cmd) error { + inPipe, err := command.StdinPipe() + if err != nil { + return err + } + outPipe, err := command.StdoutPipe() + if err != nil { + return err + } + errPipe, err := command.StderrPipe() + if err != nil { + return err + } + + go func() { + defer inPipe.Close() + io.Copy(inPipe, s.stdin) + }() + + go io.Copy(s.stdout, outPipe) + go io.Copy(s.stderr, errPipe) + + return nil +} diff --git a/pkg/libcontainer/nsinit/unsupported.go b/pkg/libcontainer/nsinit/unsupported.go new file mode 100644 index 0000000000..2412223d28 --- /dev/null +++ b/pkg/libcontainer/nsinit/unsupported.go @@ -0,0 +1,19 @@ +// +build !linux + +package nsinit + +import ( + "github.com/dotcloud/docker/pkg/libcontainer" +) + +func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) { + return -1, libcontainer.ErrUnsupported +} + +func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { + return -1, libcontainer.ErrUnsupported +} + +func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error { + return libcontainer.ErrUnsupported +} diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go new file mode 100644 index 0000000000..94fe876554 --- /dev/null +++ b/pkg/libcontainer/types.go @@ -0,0 +1,137 @@ +package libcontainer + +import ( + "encoding/json" + "errors" + "github.com/syndtr/gocapability/capability" +) + +var ( + ErrUnkownNamespace = errors.New("Unknown namespace") + ErrUnkownCapability = errors.New("Unknown capability") + ErrUnsupported = errors.New("Unsupported method") +) + +// namespaceList is used to convert the libcontainer types +// into the names of the files located in /proc//ns/* for +// each namespace +var ( + namespaceList = Namespaces{} + + capabilityList = Capabilities{ + {Key: "SETPCAP", Value: capability.CAP_SETPCAP}, + {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE}, + {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO}, + {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT}, + {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN}, + {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE}, + {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE}, + {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME}, + {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG}, + {Key: "MKNOD", Value: capability.CAP_MKNOD}, + {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE}, + {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL}, + {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE}, + {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, + {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, + } +) + +type ( + Namespace struct { + Key string + Value int + File string + } + Namespaces []*Namespace +) + +func (ns *Namespace) String() string { + return ns.Key +} + +func (ns *Namespace) MarshalJSON() ([]byte, error) { + return json.Marshal(ns.Key) +} + +func (ns *Namespace) UnmarshalJSON(src []byte) error { + var nsName string + if err := json.Unmarshal(src, &nsName); err != nil { + return err + } + ret := GetNamespace(nsName) + if ret == nil { + return ErrUnkownNamespace + } + *ns = *ret + return nil +} + +func GetNamespace(key string) *Namespace { + for _, ns := range namespaceList { + if ns.Key == key { + return ns + } + } + return nil +} + +// Contains returns true if the specified Namespace is +// in the slice +func (n Namespaces) Contains(ns string) bool { + for _, nsp := range n { + if nsp.Key == ns { + return true + } + } + return false +} + +type ( + Capability struct { + Key string + Value capability.Cap + } + Capabilities []*Capability +) + +func (c *Capability) String() string { + return c.Key +} + +func (c *Capability) MarshalJSON() ([]byte, error) { + return json.Marshal(c.Key) +} + +func (c *Capability) UnmarshalJSON(src []byte) error { + var capName string + if err := json.Unmarshal(src, &capName); err != nil { + return err + } + ret := GetCapability(capName) + if ret == nil { + return ErrUnkownCapability + } + *c = *ret + return nil +} + +func GetCapability(key string) *Capability { + for _, capp := range capabilityList { + if capp.Key == key { + return capp + } + } + return nil +} + +// Contains returns true if the specified Capability is +// in the slice +func (c Capabilities) Contains(capp string) bool { + for _, cap := range c { + if cap.Key == capp { + return true + } + } + return false +} diff --git a/pkg/libcontainer/types_linux.go b/pkg/libcontainer/types_linux.go new file mode 100644 index 0000000000..c14531df20 --- /dev/null +++ b/pkg/libcontainer/types_linux.go @@ -0,0 +1,16 @@ +package libcontainer + +import ( + "syscall" +) + +func init() { + namespaceList = Namespaces{ + {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"}, + {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"}, + {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"}, + {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"}, + {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"}, + {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"}, + } +} diff --git a/pkg/libcontainer/types_test.go b/pkg/libcontainer/types_test.go new file mode 100644 index 0000000000..52b85a4db9 --- /dev/null +++ b/pkg/libcontainer/types_test.go @@ -0,0 +1,35 @@ +package libcontainer + +import ( + "testing" +) + +func TestNamespacesContains(t *testing.T) { + ns := Namespaces{ + GetNamespace("NEWPID"), + GetNamespace("NEWNS"), + GetNamespace("NEWUTS"), + } + + if ns.Contains("NEWNET") { + t.Fatal("namespaces should not contain NEWNET") + } + + if !ns.Contains("NEWPID") { + t.Fatal("namespaces should contain NEWPID but does not") + } +} + +func TestCapabilitiesContains(t *testing.T) { + caps := Capabilities{ + GetCapability("MKNOD"), + GetCapability("SETPCAP"), + } + + if caps.Contains("SYS_ADMIN") { + t.Fatal("capabilities should not contain SYS_ADMIN") + } + if !caps.Contains("MKNOD") { + t.Fatal("capabilities should container MKNOD but does not") + } +} diff --git a/pkg/libcontainer/utils/utils.go b/pkg/libcontainer/utils/utils.go new file mode 100644 index 0000000000..0d919bc43d --- /dev/null +++ b/pkg/libcontainer/utils/utils.go @@ -0,0 +1,28 @@ +package utils + +import ( + "crypto/rand" + "encoding/hex" + "io" + "path/filepath" +) + +// GenerateRandomName returns a new name joined with a prefix. This size +// specified is used to truncate the randomly generated value +func GenerateRandomName(prefix string, size int) (string, error) { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + return "", err + } + return prefix + hex.EncodeToString(id)[:size], nil +} + +// ResolveRootfs ensures that the current working directory is +// not a symlink and returns the absolute path to the rootfs +func ResolveRootfs(uncleanRootfs string) (string, error) { + rootfs, err := filepath.Abs(uncleanRootfs) + if err != nil { + return "", err + } + return filepath.EvalSymlinks(rootfs) +} diff --git a/pkg/listenbuffer/buffer.go b/pkg/listenbuffer/buffer.go new file mode 100644 index 0000000000..c350805a7d --- /dev/null +++ b/pkg/listenbuffer/buffer.go @@ -0,0 +1,61 @@ +/* + Package to allow go applications to immediately start + listening on a socket, unix, tcp, udp but hold connections + until the application has booted and is ready to accept them +*/ +package listenbuffer + +import ( + "fmt" + "net" + "time" +) + +// NewListenBuffer returns a listener listening on addr with the protocol. It sets the +// timeout to wait on first connection before an error is returned +func NewListenBuffer(proto, addr string, activate chan struct{}, timeout time.Duration) (net.Listener, error) { + wrapped, err := net.Listen(proto, addr) + if err != nil { + return nil, err + } + + return &defaultListener{ + wrapped: wrapped, + activate: activate, + timeout: timeout, + }, nil +} + +type defaultListener struct { + wrapped net.Listener // the real listener to wrap + ready bool // is the listner ready to start accpeting connections + activate chan struct{} + timeout time.Duration // how long to wait before we consider this an error +} + +func (l *defaultListener) Close() error { + return l.wrapped.Close() +} + +func (l *defaultListener) Addr() net.Addr { + return l.wrapped.Addr() +} + +func (l *defaultListener) Accept() (net.Conn, error) { + // if the listen has been told it is ready then we can go ahead and + // start returning connections + if l.ready { + return l.wrapped.Accept() + } + + select { + case <-time.After(l.timeout): + // close the connection so any clients are disconnected + l.Close() + return nil, fmt.Errorf("timeout (%s) reached waiting for listener to become ready", l.timeout.String()) + case <-l.activate: + l.ready = true + return l.Accept() + } + panic("unreachable") +} diff --git a/pkg/mflag/example/example.go b/pkg/mflag/example/example.go index fa26c97e1b..ed940e8d70 100644 --- a/pkg/mflag/example/example.go +++ b/pkg/mflag/example/example.go @@ -12,9 +12,10 @@ var ( ) func init() { + flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") flag.BoolVar(&b, []string{"b"}, false, "a simple bool") - flag.BoolVar(&b2, []string{"-bool"}, false, "a simple bool") - flag.IntVar(&i, []string{"#integer", "-integer"}, -1, "a simple integer") + flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") + flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") flag.Parse() @@ -27,4 +28,5 @@ func main() { fmt.Printf("b: %b\n", b) fmt.Printf("-bool: %b\n", b2) fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) + fmt.Printf("ARGS: %v\n", flag.Args()) } diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index f721e04557..7125c030ed 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -77,6 +77,9 @@ import ( // ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. var ErrHelp = errors.New("flag: help requested") +// ErrRetry is the error returned if you need to try letter by letter +var ErrRetry = errors.New("flag: retry") + // -- bool Value type boolValue bool @@ -287,13 +290,13 @@ type Flag struct { func sortFlags(flags map[string]*Flag) []*Flag { var list sort.StringSlice for _, f := range flags { + fName := strings.TrimPrefix(f.Names[0], "#") if len(f.Names) == 1 { - list = append(list, f.Names[0]) + list = append(list, fName) continue } found := false - fName := strings.TrimPrefix(strings.TrimPrefix(f.Names[0], "#"), "-") for _, name := range list { if name == fName { found = true @@ -401,7 +404,9 @@ func (f *FlagSet) PrintDefaults() { names = append(names, name) } } - fmt.Fprintf(f.out(), format, strings.Join(names, ", -"), flag.DefValue, flag.Usage) + if len(names) > 0 { + fmt.Fprintf(f.out(), format, strings.Join(names, ", -"), flag.DefValue, flag.Usage) + } }) } @@ -733,21 +738,21 @@ func (f *FlagSet) usage() { } // parseOne parses one flag. It reports whether a flag was seen. -func (f *FlagSet) parseOne() (bool, error) { +func (f *FlagSet) parseOne() (bool, string, error) { if len(f.args) == 0 { - return false, nil + return false, "", nil } s := f.args[0] if len(s) == 0 || s[0] != '-' || len(s) == 1 { - return false, nil + return false, "", nil } if s[1] == '-' && len(s) == 2 { // "--" terminates the flags f.args = f.args[1:] - return false, nil + return false, "", nil } name := s[1:] if len(name) == 0 || name[0] == '=' { - return false, f.failf("bad flag syntax: %s", s) + return false, "", f.failf("bad flag syntax: %s", s) } // it's a flag. does it have an argument? @@ -767,14 +772,17 @@ func (f *FlagSet) parseOne() (bool, error) { if !alreadythere { if name == "-help" || name == "help" || name == "h" { // special case for nice help message. f.usage() - return false, ErrHelp + return false, "", ErrHelp } - return false, f.failf("flag provided but not defined: -%s", name) + if len(name) > 0 && name[0] == '-' { + return false, "", f.failf("flag provided but not defined: -%s", name) + } + return false, name, ErrRetry } if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg if has_value { if err := fv.Set(value); err != nil { - return false, f.failf("invalid boolean value %q for -%s: %v", value, name, err) + return false, "", f.failf("invalid boolean value %q for -%s: %v", value, name, err) } } else { fv.Set("true") @@ -787,17 +795,22 @@ func (f *FlagSet) parseOne() (bool, error) { value, f.args = f.args[0], f.args[1:] } if !has_value { - return false, f.failf("flag needs an argument: -%s", name) + return false, "", f.failf("flag needs an argument: -%s", name) } if err := flag.Value.Set(value); err != nil { - return false, f.failf("invalid value %q for flag -%s: %v", value, name, err) + return false, "", f.failf("invalid value %q for flag -%s: %v", value, name, err) } } if f.actual == nil { f.actual = make(map[string]*Flag) } f.actual[name] = flag - return true, nil + for _, n := range flag.Names { + if n == fmt.Sprintf("#%s", name) { + fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) + } + } + return true, "", nil } // Parse parses flag definitions from the argument list, which should not @@ -808,13 +821,34 @@ func (f *FlagSet) Parse(arguments []string) error { f.parsed = true f.args = arguments for { - seen, err := f.parseOne() + seen, name, err := f.parseOne() if seen { continue } if err == nil { break } + if err == ErrRetry { + if len(name) > 1 { + err = nil + for _, letter := range strings.Split(name, "") { + f.args = append([]string{"-" + letter}, f.args...) + seen2, _, err2 := f.parseOne() + if seen2 { + continue + } + if err2 != nil { + err = f.failf("flag provided but not defined: -%s", name) + break + } + } + if err == nil { + continue + } + } else { + err = f.failf("flag provided but not defined: -%s", name) + } + } switch f.errorHandling { case ContinueOnError: return err diff --git a/pkg/netlink/MAINTAINERS b/pkg/netlink/MAINTAINERS new file mode 100644 index 0000000000..e53d933d47 --- /dev/null +++ b/pkg/netlink/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume Charmes (@creack) diff --git a/pkg/netlink/netlink.go b/pkg/netlink/netlink.go index 5098b4b816..5cc756256d 100644 --- a/pkg/netlink/netlink.go +++ b/pkg/netlink/netlink.go @@ -5,7 +5,15 @@ // netlink_darwin.go package netlink -import "net" +import ( + "errors" + "net" +) + +var ( + ErrWrongSockType = errors.New("Wrong socket type") + ErrShortResponse = errors.New("Got short response from netlink") +) // A Route is a subnet associated with the interface to reach it. type Route struct { diff --git a/pkg/netlink/netlink_linux.go b/pkg/netlink/netlink_linux.go index 0ea5b4dbac..f8bb6bac3c 100644 --- a/pkg/netlink/netlink_linux.go +++ b/pkg/netlink/netlink_linux.go @@ -10,6 +10,15 @@ import ( "unsafe" ) +const ( + IFNAMSIZ = 16 + DEFAULT_CHANGE = 0xFFFFFFFF + IFLA_INFO_KIND = 1 + IFLA_INFO_DATA = 2 + VETH_INFO_PEER = 1 + IFLA_NET_NS_FD = 28 +) + var nextSeqNr int func nativeEndian() binary.ByteOrder { @@ -36,6 +45,7 @@ func getIpFamily(ip net.IP) int { } type NetlinkRequestData interface { + Len() int ToWireFormat() []byte } @@ -44,21 +54,24 @@ type IfInfomsg struct { } func newIfInfomsg(family int) *IfInfomsg { - msg := &IfInfomsg{} - msg.Family = uint8(family) - msg.Type = uint16(0) - msg.Index = int32(0) - msg.Flags = uint32(0) - msg.Change = uint32(0) + return &IfInfomsg{ + IfInfomsg: syscall.IfInfomsg{ + Family: uint8(family), + }, + } +} +func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { + msg := newIfInfomsg(family) + parent.children = append(parent.children, msg) return msg } func (msg *IfInfomsg) ToWireFormat() []byte { native := nativeEndian() - len := syscall.SizeofIfInfomsg - b := make([]byte, len) + length := syscall.SizeofIfInfomsg + b := make([]byte, length) b[0] = msg.Family b[1] = 0 native.PutUint16(b[2:4], msg.Type) @@ -68,26 +81,27 @@ func (msg *IfInfomsg) ToWireFormat() []byte { return b } +func (msg *IfInfomsg) Len() int { + return syscall.SizeofIfInfomsg +} + type IfAddrmsg struct { syscall.IfAddrmsg } func newIfAddrmsg(family int) *IfAddrmsg { - msg := &IfAddrmsg{} - msg.Family = uint8(family) - msg.Prefixlen = uint8(0) - msg.Flags = uint8(0) - msg.Scope = uint8(0) - msg.Index = uint32(0) - - return msg + return &IfAddrmsg{ + IfAddrmsg: syscall.IfAddrmsg{ + Family: uint8(family), + }, + } } func (msg *IfAddrmsg) ToWireFormat() []byte { native := nativeEndian() - len := syscall.SizeofIfAddrmsg - b := make([]byte, len) + length := syscall.SizeofIfAddrmsg + b := make([]byte, length) b[0] = msg.Family b[1] = msg.Prefixlen b[2] = msg.Flags @@ -96,26 +110,31 @@ func (msg *IfAddrmsg) ToWireFormat() []byte { return b } +func (msg *IfAddrmsg) Len() int { + return syscall.SizeofIfAddrmsg +} + type RtMsg struct { syscall.RtMsg } func newRtMsg(family int) *RtMsg { - msg := &RtMsg{} - msg.Family = uint8(family) - msg.Table = syscall.RT_TABLE_MAIN - msg.Scope = syscall.RT_SCOPE_UNIVERSE - msg.Protocol = syscall.RTPROT_BOOT - msg.Type = syscall.RTN_UNICAST - - return msg + return &RtMsg{ + RtMsg: syscall.RtMsg{ + Family: uint8(family), + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_UNIVERSE, + Protocol: syscall.RTPROT_BOOT, + Type: syscall.RTN_UNICAST, + }, + } } func (msg *RtMsg) ToWireFormat() []byte { native := nativeEndian() - len := syscall.SizeofRtMsg - b := make([]byte, len) + length := syscall.SizeofRtMsg + b := make([]byte, length) b[0] = msg.Family b[1] = msg.Dst_len b[2] = msg.Src_len @@ -128,35 +147,70 @@ func (msg *RtMsg) ToWireFormat() []byte { return b } +func (msg *RtMsg) Len() int { + return syscall.SizeofRtMsg +} + func rtaAlignOf(attrlen int) int { return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) } type RtAttr struct { syscall.RtAttr - Data []byte + Data []byte + children []NetlinkRequestData } func newRtAttr(attrType int, data []byte) *RtAttr { - attr := &RtAttr{} - attr.Type = uint16(attrType) - attr.Data = data + return &RtAttr{ + RtAttr: syscall.RtAttr{ + Type: uint16(attrType), + }, + children: []NetlinkRequestData{}, + Data: data, + } +} +func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { + attr := newRtAttr(attrType, data) + parent.children = append(parent.children, attr) return attr } -func (attr *RtAttr) ToWireFormat() []byte { +func (a *RtAttr) Len() int { + l := 0 + for _, child := range a.children { + l += child.Len() + syscall.SizeofRtAttr + } + if l == 0 { + l++ + } + return rtaAlignOf(l + len(a.Data)) +} + +func (a *RtAttr) ToWireFormat() []byte { native := nativeEndian() - len := syscall.SizeofRtAttr + len(attr.Data) - b := make([]byte, rtaAlignOf(len)) - native.PutUint16(b[0:2], uint16(len)) - native.PutUint16(b[2:4], attr.Type) - for i, d := range attr.Data { - b[4+i] = d + length := a.Len() + buf := make([]byte, rtaAlignOf(length+syscall.SizeofRtAttr)) + + if a.Data != nil { + copy(buf[4:], a.Data) + } else { + next := 4 + for _, child := range a.children { + childBuf := child.ToWireFormat() + copy(buf[next:], childBuf) + next += rtaAlignOf(len(childBuf)) + } } - return b + if l := uint16(rtaAlignOf(length)); l != 0 { + native.PutUint16(buf[0:2], l+1) + } + native.PutUint16(buf[2:4], a.Type) + + return buf } type NetlinkRequest struct { @@ -171,7 +225,7 @@ func (rr *NetlinkRequest) ToWireFormat() []byte { dataBytes := make([][]byte, len(rr.Data)) for i, data := range rr.Data { dataBytes[i] = data.ToWireFormat() - length = length + uint32(len(dataBytes[i])) + length += uint32(len(dataBytes[i])) } b := make([]byte, length) native.PutUint32(b[0:4], length) @@ -180,27 +234,29 @@ func (rr *NetlinkRequest) ToWireFormat() []byte { native.PutUint32(b[8:12], rr.Seq) native.PutUint32(b[12:16], rr.Pid) - i := 16 + next := 16 for _, data := range dataBytes { - for _, dataByte := range data { - b[i] = dataByte - i = i + 1 - } + copy(b[next:], data) + next += len(data) } return b } func (rr *NetlinkRequest) AddData(data NetlinkRequestData) { - rr.Data = append(rr.Data, data) + if data != nil { + rr.Data = append(rr.Data, data) + } } func newNetlinkRequest(proto, flags int) *NetlinkRequest { - rr := &NetlinkRequest{} - rr.Len = uint32(syscall.NLMSG_HDRLEN) - rr.Type = uint16(proto) - rr.Flags = syscall.NLM_F_REQUEST | uint16(flags) - rr.Seq = uint32(getSeq()) - return rr + return &NetlinkRequest{ + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.NLMSG_HDRLEN), + Type: uint16(proto), + Flags: syscall.NLM_F_REQUEST | uint16(flags), + Seq: uint32(getSeq()), + }, + } } type NetlinkSocket struct { @@ -243,7 +299,7 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { return nil, err } if nr < syscall.NLMSG_HDRLEN { - return nil, fmt.Errorf("Got short response from netlink") + return nil, ErrShortResponse } rb = rb[:nr] return syscall.ParseNetlinkMessage(rb) @@ -258,7 +314,7 @@ func (s *NetlinkSocket) GetPid() (uint32, error) { case *syscall.SockaddrNetlink: return v.Pid, nil } - return 0, fmt.Errorf("Wrong socket type") + return 0, ErrWrongSockType } func (s *NetlinkSocket) HandleAck(seq uint32) error { @@ -355,6 +411,28 @@ func NetworkLinkUp(iface *net.Interface) error { return s.HandleAck(wb.Seq) } +func NetworkLinkDown(iface *net.Interface) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_UP + msg.Flags = 0 & ^syscall.IFF_UP + msg.Index = int32(iface.Index) + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + func NetworkSetMTU(iface *net.Interface, mtu int) error { s, err := getNetlinkSocket() if err != nil { @@ -368,7 +446,7 @@ func NetworkSetMTU(iface *net.Interface, mtu int) error { msg.Type = syscall.RTM_SETLINK msg.Flags = syscall.NLM_F_REQUEST msg.Index = int32(iface.Index) - msg.Change = 0xFFFFFFFF + msg.Change = DEFAULT_CHANGE wb.AddData(msg) var ( @@ -386,6 +464,103 @@ func NetworkSetMTU(iface *net.Interface, mtu int) error { return s.HandleAck(wb.Seq) } +// same as ip link set $name master $master +func NetworkSetMaster(iface, master *net.Interface) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + var ( + b = make([]byte, 4) + native = nativeEndian() + ) + native.PutUint32(b, uint32(master.Index)) + + data := newRtAttr(syscall.IFLA_MASTER, b) + wb.AddData(data) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +func NetworkSetNsPid(iface *net.Interface, nspid int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + var ( + b = make([]byte, 4) + native = nativeEndian() + ) + native.PutUint32(b, uint32(nspid)) + + data := newRtAttr(syscall.IFLA_NET_NS_PID, b) + wb.AddData(data) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +func NetworkSetNsFd(iface *net.Interface, fd int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + var ( + b = make([]byte, 4) + native = nativeEndian() + ) + native.PutUint32(b, uint32(fd)) + + data := newRtAttr(IFLA_NET_NS_FD, b) + wb.AddData(data) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + // Add an Ip address to an interface. This is identical to: // ip addr add $ip/$ipNet dev $iface func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { @@ -426,20 +601,11 @@ func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { } func zeroTerminated(s string) []byte { - bytes := make([]byte, len(s)+1) - for i := 0; i < len(s); i++ { - bytes[i] = s[i] - } - bytes[len(s)] = 0 - return bytes + return []byte(s + "\000") } func nonZeroTerminated(s string) []byte { - bytes := make([]byte, len(s)) - for i := 0; i < len(s); i++ { - bytes[i] = s[i] - } - return bytes + return []byte(s) } // Add a new network link of a specified type. This is identical to @@ -456,10 +622,10 @@ func NetworkLinkAdd(name string, linkType string) error { msg := newIfInfomsg(syscall.AF_UNSPEC) wb.AddData(msg) - nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name)) - wb.AddData(nameData) - - IFLA_INFO_KIND := 1 + if name != "" { + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name)) + wb.AddData(nameData) + } kindData := newRtAttr(IFLA_INFO_KIND, nonZeroTerminated(linkType)) @@ -576,3 +742,69 @@ done: return res, nil } + +func getIfSocket() (fd int, err error) { + for _, socket := range []int{ + syscall.AF_INET, + syscall.AF_PACKET, + syscall.AF_INET6, + } { + if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil { + break + } + } + if err == nil { + return fd, nil + } + return -1, err +} + +func NetworkChangeName(iface *net.Interface, newName string) error { + fd, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(fd) + + data := [IFNAMSIZ * 2]byte{} + // the "-1"s here are very important for ensuring we get proper null + // termination of our new C strings + copy(data[:IFNAMSIZ-1], iface.Name) + copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName) + + if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { + return errno + } + return nil +} + +func NetworkCreateVethPair(name1, name2 string) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) + wb.AddData(nameData) + + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) + nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) + nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil) + + newIfInfomsgChild(nest3, syscall.AF_UNSPEC) + newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) + + wb.AddData(nest1) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} diff --git a/pkg/netlink/netlink_unsupported.go b/pkg/netlink/netlink_unsupported.go index cd796b373f..bd9e962d35 100644 --- a/pkg/netlink/netlink_unsupported.go +++ b/pkg/netlink/netlink_unsupported.go @@ -3,31 +3,59 @@ package netlink import ( - "fmt" + "errors" "net" ) +var ( + ErrNotImplemented = errors.New("not implemented") +) + func NetworkGetRoutes() ([]Route, error) { - return nil, fmt.Errorf("Not implemented") + return nil, ErrNotImplemented } func NetworkLinkAdd(name string, linkType string) error { - return fmt.Errorf("Not implemented") + return ErrNotImplemented } func NetworkLinkUp(iface *net.Interface) error { - return fmt.Errorf("Not implemented") + return ErrNotImplemented } func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return fmt.Errorf("Not implemented") + return ErrNotImplemented } func AddDefaultGw(ip net.IP) error { - return fmt.Errorf("Not implemented") + return ErrNotImplemented } func NetworkSetMTU(iface *net.Interface, mtu int) error { - return fmt.Errorf("Not implemented") + return ErrNotImplemented +} + +func NetworkCreateVethPair(name1, name2 string) error { + return ErrNotImplemented +} + +func NetworkChangeName(iface *net.Interface, newName string) error { + return ErrNotImplemented +} + +func NetworkSetNsFd(iface *net.Interface, fd int) error { + return ErrNotImplemented +} + +func NetworkSetNsPid(iface *net.Interface, nspid int) error { + return ErrNotImplemented +} + +func NetworkSetMaster(iface, master *net.Interface) error { + return ErrNotImplemented +} + +func NetworkLinkDown(iface *net.Interface) error { + return ErrNotImplemented } diff --git a/opts.go b/pkg/opts/opts.go similarity index 89% rename from opts.go rename to pkg/opts/opts.go index b1d71c491d..a1b8752bad 100644 --- a/opts.go +++ b/pkg/opts/opts.go @@ -1,8 +1,7 @@ -package docker +package opts import ( "fmt" - "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/utils" "os" "path/filepath" @@ -99,6 +98,16 @@ func ValidateLink(val string) (string, error) { return val, nil } +// FIXME: this is a duplicate of docker.utils.parseLink. +// it can't be moved to a separate links/ package because +// links depends on Container which is defined in the core. +// +// Links come in the format of +// name:alias +func parseLink(rawLink string) (map[string]string, error) { + return utils.PartParser("name:alias", rawLink) +} + func ValidatePath(val string) (string, error) { var containerPath string @@ -129,14 +138,6 @@ func ValidateEnv(val string) (string, error) { return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil } -func ValidateHost(val string) (string, error) { - host, err := utils.ParseHost(api.DEFAULTHTTPHOST, api.DEFAULTHTTPPORT, api.DEFAULTUNIXSOCKET, val) - if err != nil { - return val, err - } - return host, nil -} - func ValidateIp4Address(val string) (string, error) { re := regexp.MustCompile(`^(([0-9]+\.){3}([0-9]+))\s*$`) var ns = re.FindSubmatch([]byte(val)) diff --git a/opts_unit_test.go b/pkg/opts/opts_test.go similarity index 97% rename from opts_unit_test.go rename to pkg/opts/opts_test.go index 67b061771b..a5c1fac9ca 100644 --- a/opts_unit_test.go +++ b/pkg/opts/opts_test.go @@ -1,4 +1,4 @@ -package docker +package opts import ( "testing" diff --git a/proxy/MAINTAINERS b/pkg/proxy/MAINTAINERS similarity index 100% rename from proxy/MAINTAINERS rename to pkg/proxy/MAINTAINERS diff --git a/proxy/network_proxy_test.go b/pkg/proxy/network_proxy_test.go similarity index 100% rename from proxy/network_proxy_test.go rename to pkg/proxy/network_proxy_test.go diff --git a/proxy/proxy.go b/pkg/proxy/proxy.go similarity index 100% rename from proxy/proxy.go rename to pkg/proxy/proxy.go diff --git a/proxy/stub_proxy.go b/pkg/proxy/stub_proxy.go similarity index 100% rename from proxy/stub_proxy.go rename to pkg/proxy/stub_proxy.go diff --git a/proxy/tcp_proxy.go b/pkg/proxy/tcp_proxy.go similarity index 77% rename from proxy/tcp_proxy.go rename to pkg/proxy/tcp_proxy.go index e7c460f61d..1aa6d9fd70 100644 --- a/proxy/tcp_proxy.go +++ b/pkg/proxy/tcp_proxy.go @@ -1,7 +1,6 @@ package proxy import ( - "github.com/dotcloud/docker/utils" "io" "log" "net" @@ -31,7 +30,7 @@ func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr) (*TCPProxy, error) { func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { backend, err := net.DialTCP("tcp", nil, proxy.backendAddr) if err != nil { - log.Printf("Can't forward traffic to backend tcp/%v: %v\n", proxy.backendAddr, err.Error()) + log.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err) client.Close() return } @@ -49,7 +48,7 @@ func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { to.CloseRead() event <- written } - utils.Debugf("Forwarding traffic between tcp/%v and tcp/%v", client.RemoteAddr(), backend.RemoteAddr()) + go broker(client, backend) go broker(backend, client) @@ -65,23 +64,20 @@ func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { for ; i < 2; i++ { transferred += <-event } - goto done + return } } client.Close() backend.Close() -done: - utils.Debugf("%v bytes transferred between tcp/%v and tcp/%v", transferred, client.RemoteAddr(), backend.RemoteAddr()) } func (proxy *TCPProxy) Run() { quit := make(chan bool) defer close(quit) - utils.Debugf("Starting proxy on tcp/%v for tcp/%v", proxy.frontendAddr, proxy.backendAddr) for { client, err := proxy.listener.Accept() if err != nil { - utils.Debugf("Stopping proxy on tcp/%v for tcp/%v (%v)", proxy.frontendAddr, proxy.backendAddr, err.Error()) + log.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) return } go proxy.clientLoop(client.(*net.TCPConn), quit) diff --git a/proxy/udp_proxy.go b/pkg/proxy/udp_proxy.go similarity index 79% rename from proxy/udp_proxy.go rename to pkg/proxy/udp_proxy.go index 7d34988f70..14f2306a5a 100644 --- a/proxy/udp_proxy.go +++ b/pkg/proxy/udp_proxy.go @@ -2,9 +2,9 @@ package proxy import ( "encoding/binary" - "github.com/dotcloud/docker/utils" "log" "net" + "strings" "sync" "syscall" "time" @@ -66,7 +66,6 @@ func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr proxy.connTrackLock.Lock() delete(proxy.connTrackTable, *clientKey) proxy.connTrackLock.Unlock() - utils.Debugf("Done proxying between udp/%v and udp/%v", clientAddr.String(), proxy.backendAddr.String()) proxyConn.Close() }() @@ -92,24 +91,20 @@ func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr return } i += written - utils.Debugf("Forwarded %v/%v bytes to udp/%v", i, read, clientAddr.String()) } } } func (proxy *UDPProxy) Run() { readBuf := make([]byte, UDPBufSize) - utils.Debugf("Starting proxy on udp/%v for udp/%v", proxy.frontendAddr, proxy.backendAddr) for { read, from, err := proxy.listener.ReadFromUDP(readBuf) if err != nil { // NOTE: Apparently ReadFrom doesn't return // ECONNREFUSED like Read do (see comment in // UDPProxy.replyLoop) - if utils.IsClosedError(err) { - utils.Debugf("Stopping proxy on udp/%v for udp/%v (socket was closed)", proxy.frontendAddr, proxy.backendAddr) - } else { - utils.Errorf("Stopping proxy on udp/%v for udp/%v (%v)", proxy.frontendAddr, proxy.backendAddr, err.Error()) + if !isClosedError(err) { + log.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) } break } @@ -120,7 +115,7 @@ func (proxy *UDPProxy) Run() { if !hit { proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) if err != nil { - log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err) + log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) continue } proxy.connTrackTable[*fromKey] = proxyConn @@ -130,11 +125,10 @@ func (proxy *UDPProxy) Run() { for i := 0; i != read; { written, err := proxyConn.Write(readBuf[i:read]) if err != nil { - log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err) + log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) break } i += written - utils.Debugf("Forwarded %v/%v bytes to udp/%v", i, read, proxy.backendAddr.String()) } } } @@ -150,3 +144,13 @@ func (proxy *UDPProxy) Close() { func (proxy *UDPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } func (proxy *UDPProxy) BackendAddr() net.Addr { return proxy.backendAddr } + +func isClosedError(err error) bool { + /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. + * See: + * http://golang.org/src/pkg/net/net.go + * https://code.google.com/p/go/issues/detail?id=4337 + * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ + */ + return strings.HasSuffix(err.Error(), "use of closed network connection") +} diff --git a/pkg/system/calls_linux.go b/pkg/system/calls_linux.go new file mode 100644 index 0000000000..bf667c535b --- /dev/null +++ b/pkg/system/calls_linux.go @@ -0,0 +1,145 @@ +package system + +import ( + "os/exec" + "syscall" +) + +func Chroot(dir string) error { + return syscall.Chroot(dir) +} + +func Chdir(dir string) error { + return syscall.Chdir(dir) +} + +func Exec(cmd string, args []string, env []string) error { + return syscall.Exec(cmd, args, env) +} + +func Execv(cmd string, args []string, env []string) error { + name, err := exec.LookPath(cmd) + if err != nil { + return err + } + return Exec(name, args, env) +} + +func Fork() (int, error) { + syscall.ForkLock.Lock() + pid, _, err := syscall.Syscall(syscall.SYS_FORK, 0, 0, 0) + syscall.ForkLock.Unlock() + if err != 0 { + return -1, err + } + return int(pid), nil +} + +func Mount(source, target, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} + +func Unmount(target string, flags int) error { + return syscall.Unmount(target, flags) +} + +func Pivotroot(newroot, putold string) error { + return syscall.PivotRoot(newroot, putold) +} + +func Unshare(flags int) error { + return syscall.Unshare(flags) +} + +func Clone(flags uintptr) (int, error) { + syscall.ForkLock.Lock() + pid, _, err := syscall.RawSyscall(syscall.SYS_CLONE, flags, 0, 0) + syscall.ForkLock.Unlock() + if err != 0 { + return -1, err + } + return int(pid), nil +} + +func UsetCloseOnExec(fd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, fd, syscall.F_SETFD, 0); err != 0 { + return err + } + return nil +} + +func Setgroups(gids []int) error { + return syscall.Setgroups(gids) +} + +func Setresgid(rgid, egid, sgid int) error { + return syscall.Setresgid(rgid, egid, sgid) +} + +func Setresuid(ruid, euid, suid int) error { + return syscall.Setresuid(ruid, euid, suid) +} + +func Setgid(gid int) error { + return syscall.Setgid(gid) +} + +func Setuid(uid int) error { + return syscall.Setuid(uid) +} + +func Sethostname(name string) error { + return syscall.Sethostname([]byte(name)) +} + +func Setsid() (int, error) { + return syscall.Setsid() +} + +func Ioctl(fd uintptr, flag, data uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +func Closefd(fd uintptr) error { + return syscall.Close(int(fd)) +} + +func Dup2(fd1, fd2 uintptr) error { + return syscall.Dup2(int(fd1), int(fd2)) +} + +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +func ParentDeathSignal() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, uintptr(syscall.SIGKILL), 0); err != 0 { + return err + } + return nil +} + +func Setctty() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { + return err + } + return nil +} + +func Mkfifo(name string, mode uint32) error { + return syscall.Mkfifo(name, mode) +} + +func Umask(mask int) int { + return syscall.Umask(mask) +} + +func SetCloneFlags(cmd *exec.Cmd, flag uintptr) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Cloneflags = flag +} diff --git a/pkg/system/errors.go b/pkg/system/errors.go new file mode 100644 index 0000000000..63045186fe --- /dev/null +++ b/pkg/system/errors.go @@ -0,0 +1,9 @@ +package system + +import ( + "errors" +) + +var ( + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/pkg/system/pty_linux.go b/pkg/system/pty_linux.go new file mode 100644 index 0000000000..ca588d8ce9 --- /dev/null +++ b/pkg/system/pty_linux.go @@ -0,0 +1,58 @@ +package system + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +// Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// Unlockpt should be called before opening the slave side of a pseudoterminal. +func Unlockpt(f *os.File) error { + var u int + return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// Ptsname retrieves the name of the first available pts for the given master. +func Ptsname(f *os.File) (string, error) { + var n int + + if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} + +// CreateMasterAndConsole will open /dev/ptmx on the host and retreive the +// pts name for use as the pty slave inside the container +func CreateMasterAndConsole() (*os.File, string, error) { + master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + console, err := Ptsname(master) + if err != nil { + return nil, "", err + } + if err := Unlockpt(master); err != nil { + return nil, "", err + } + return master, console, nil +} + +// OpenPtmx opens /dev/ptmx, i.e. the PTY master. +func OpenPtmx() (*os.File, error) { + // O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all. + return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) +} + +// OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC +// used to open the pty slave inside the container namespace +func OpenTerminal(name string, flag int) (*os.File, error) { + r, e := syscall.Open(name, flag, 0) + if e != nil { + return nil, &os.PathError{"open", name, e} + } + return os.NewFile(uintptr(r), name), nil +} diff --git a/pkg/system/setns_linux.go b/pkg/system/setns_linux.go new file mode 100644 index 0000000000..2b6f9e77ec --- /dev/null +++ b/pkg/system/setns_linux.go @@ -0,0 +1,27 @@ +package system + +import ( + "fmt" + "runtime" + "syscall" +) + +// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 +// +// We need different setns values for the different platforms and arch +// We are declaring the macro here because the SETNS syscall does not exist in th stdlib +var setNsMap = map[string]uintptr{ + "linux/amd64": 308, +} + +func Setns(fd uintptr, flags uintptr) error { + ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] + if !exists { + return ErrNotSupportedPlatform + } + _, _, err := syscall.RawSyscall(ns, fd, flags, 0) + if err != 0 { + return err + } + return nil +} diff --git a/pkg/system/stat_linux.go b/pkg/system/stat_linux.go new file mode 100644 index 0000000000..e702200360 --- /dev/null +++ b/pkg/system/stat_linux.go @@ -0,0 +1,13 @@ +package system + +import ( + "syscall" +) + +func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { + return stat.Atim +} + +func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { + return stat.Mtim +} diff --git a/pkg/system/stat_unsupported.go b/pkg/system/stat_unsupported.go new file mode 100644 index 0000000000..4686a4c346 --- /dev/null +++ b/pkg/system/stat_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +import "syscall" + +func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { + return stat.Atimespec +} + +func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { + return stat.Mtimespec +} diff --git a/pkg/system/unsupported.go b/pkg/system/unsupported.go new file mode 100644 index 0000000000..eb3ec7ee92 --- /dev/null +++ b/pkg/system/unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package system + +import ( + "os/exec" +) + +func SetCloneFlags(cmd *exec.Cmd, flag uintptr) { + +} + +func UsetCloseOnExec(fd uintptr) error { + return ErrNotSupportedPlatform +} diff --git a/archive/stat_linux.go b/pkg/system/utimes_linux.go similarity index 78% rename from archive/stat_linux.go rename to pkg/system/utimes_linux.go index f87a99c55a..c00f4026a5 100644 --- a/archive/stat_linux.go +++ b/pkg/system/utimes_linux.go @@ -1,18 +1,10 @@ -package archive +package system import ( "syscall" "unsafe" ) -func getLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return stat.Atim -} - -func getLastModification(stat *syscall.Stat_t) syscall.Timespec { - return stat.Mtim -} - func LUtimesNano(path string, ts []syscall.Timespec) error { // These are not currently available in syscall AT_FDCWD := -100 diff --git a/pkg/system/utimes_unsupported.go b/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000000..d247ba283e --- /dev/null +++ b/pkg/system/utimes_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/pkg/system/xattrs_linux.go b/pkg/system/xattrs_linux.go new file mode 100644 index 0000000000..00edb201b5 --- /dev/null +++ b/pkg/system/xattrs_linux.go @@ -0,0 +1,59 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Returns a nil slice and nil error if the xattr is not set +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/pkg/system/xattrs_unsupported.go b/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000000..0060c167dc --- /dev/null +++ b/pkg/system/xattrs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package system + +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/pkg/systemd/MAINTAINERS b/pkg/systemd/MAINTAINERS new file mode 100644 index 0000000000..51228b368a --- /dev/null +++ b/pkg/systemd/MAINTAINERS @@ -0,0 +1 @@ +Brandon Philips (@philips) diff --git a/pkg/user/MAINTAINERS b/pkg/user/MAINTAINERS new file mode 100644 index 0000000000..18e05a3070 --- /dev/null +++ b/pkg/user/MAINTAINERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/pkg/user/user.go b/pkg/user/user.go new file mode 100644 index 0000000000..1672f7e679 --- /dev/null +++ b/pkg/user/user.go @@ -0,0 +1,241 @@ +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +func parseLine(line string, v ...interface{}) { + if line == "" { + return + } + + parts := strings.Split(line, ":") + for i, p := range parts { + if len(v) <= i { + // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files + break + } + + switch e := v[i].(type) { + case *string: + // "root", "adm", "/bin/bash" + *e = p + case *int: + // "0", "4", "1000" + // ignore string to int conversion errors, for great "tolerance" of naughty configuration files + *e, _ = strconv.Atoi(p) + case *[]string: + // "", "root", "root,adm,daemon" + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // panic, because this is a programming/logic error, not a runtime one + panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") + } + } +} + +func ParsePasswd() ([]*User, error) { + return ParsePasswdFilter(nil) +} + +func ParsePasswdFilter(filter func(*User) bool) ([]*User, error) { + f, err := os.Open("/etc/passwd") + if err != nil { + return nil, err + } + defer f.Close() + return parsePasswdFile(f, filter) +} + +func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { + var ( + s = bufio.NewScanner(r) + out = []*User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := &User{} + parseLine( + text, + &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroup() ([]*Group, error) { + return ParseGroupFilter(nil) +} + +func ParseGroupFilter(filter func(*Group) bool) ([]*Group, error) { + f, err := os.Open("/etc/group") + if err != nil { + return nil, err + } + defer f.Close() + return parseGroupFile(f, filter) +} + +func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { + var ( + s = bufio.NewScanner(r) + out = []*Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := &Group{} + parseLine( + text, + &p.Name, &p.Pass, &p.Gid, &p.List, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +// Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, and list of supplementary group IDs, if possible. +func GetUserGroupSupplementary(userSpec string, defaultUid int, defaultGid int) (int, int, []int, error) { + var ( + uid = defaultUid + gid = defaultGid + suppGids = []int{} + + userArg, groupArg string + ) + + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax + parseLine(userSpec, &userArg, &groupArg) + + users, err := ParsePasswdFilter(func(u *User) bool { + if userArg == "" { + return u.Uid == uid + } + return u.Name == userArg || strconv.Itoa(u.Uid) == userArg + }) + if err != nil && !os.IsNotExist(err) { + if userArg == "" { + userArg = strconv.Itoa(uid) + } + return 0, 0, nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) + } + + haveUser := users != nil && len(users) > 0 + if haveUser { + // if we found any user entries that matched our filter, let's take the first one as "correct" + uid = users[0].Uid + gid = users[0].Gid + } else if userArg != "" { + // we asked for a user but didn't find them... let's check to see if we wanted a numeric user + uid, err = strconv.Atoi(userArg) + if err != nil { + // not numeric - we have to bail + return 0, 0, nil, fmt.Errorf("Unable to find user %v", userArg) + } + + // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit + } + + if groupArg != "" || (haveUser && users[0].Name != "") { + groups, err := ParseGroupFilter(func(g *Group) bool { + if groupArg != "" { + return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg + } + for _, u := range g.List { + if u == users[0].Name { + return true + } + } + return false + }) + if err != nil && !os.IsNotExist(err) { + return 0, 0, nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + } + + haveGroup := groups != nil && len(groups) > 0 + if groupArg != "" { + if haveGroup { + // if we found any group entries that matched our filter, let's take the first one as "correct" + gid = groups[0].Gid + } else { + // we asked for a group but didn't find id... let's check to see if we wanted a numeric group + gid, err = strconv.Atoi(groupArg) + if err != nil { + // not numeric - we have to bail + return 0, 0, nil, fmt.Errorf("Unable to find group %v", groupArg) + } + + // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit + } + } else if haveGroup { + suppGids = make([]int, len(groups)) + for i, group := range groups { + suppGids[i] = group.Gid + } + } + } + + return uid, gid, suppGids, nil +} diff --git a/pkg/user/user_test.go b/pkg/user/user_test.go new file mode 100644 index 0000000000..136632c27e --- /dev/null +++ b/pkg/user/user_test.go @@ -0,0 +1,94 @@ +package user + +import ( + "strings" + "testing" +) + +func TestUserParseLine(t *testing.T) { + var ( + a, b string + c []string + d int + ) + + parseLine("", &a, &b) + if a != "" || b != "" { + t.Fatalf("a and b should be empty ('%v', '%v')", a, b) + } + + parseLine("a", &a, &b) + if a != "a" || b != "" { + t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) + } + + parseLine("bad boys:corny cows", &a, &b) + if a != "bad boys" || b != "corny cows" { + t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) + } + + parseLine("", &c) + if len(c) != 0 { + t.Fatalf("c should be empty (%#v)", c) + } + + parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) + if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { + t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("::::::::::", &a, &b, &c) + if a != "" || b != "" || len(c) != 0 { + t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("not a number", &d) + if d != 0 { + t.Fatalf("d should be 0 (%v)", d) + } + + parseLine("b:12:c", &a, &d, &b) + if a != "b" || b != "c" || d != 12 { + t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) + } +} + +func TestUserParsePasswd(t *testing.T) { + users, err := parsePasswdFile(strings.NewReader(` +root:x:0:0:root:/root:/bin/bash +adm:x:3:4:adm:/var/adm:/bin/false +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(users) != 3 { + t.Fatalf("Expected 3 users, got %v", len(users)) + } + if users[0].Uid != 0 || users[0].Name != "root" { + t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) + } + if users[1].Uid != 3 || users[1].Name != "adm" { + t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) + } +} + +func TestUserParseGroup(t *testing.T) { + groups, err := parseGroupFile(strings.NewReader(` +root:x:0:root +adm:x:4:root,adm,daemon +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(groups) != 3 { + t.Fatalf("Expected 3 groups, got %v", len(groups)) + } + if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { + t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) + } + if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { + t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) + } +} diff --git a/pkg/version/version.go b/pkg/version/version.go new file mode 100644 index 0000000000..3721d64aa8 --- /dev/null +++ b/pkg/version/version.go @@ -0,0 +1,52 @@ +package version + +import ( + "strconv" + "strings" +) + +type Version string + +func (me Version) compareTo(other string) int { + var ( + meTab = strings.Split(string(me), ".") + otherTab = strings.Split(other, ".") + ) + for i, s := range meTab { + var meInt, otherInt int + meInt, _ = strconv.Atoi(s) + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if meInt > otherInt { + return 1 + } + if otherInt > meInt { + return -1 + } + } + if len(otherTab) > len(meTab) { + return -1 + } + return 0 +} + +func (me Version) LessThan(other string) bool { + return me.compareTo(other) == -1 +} + +func (me Version) LessThanOrEqualTo(other string) bool { + return me.compareTo(other) <= 0 +} + +func (me Version) GreaterThan(other string) bool { + return me.compareTo(other) == 1 +} + +func (me Version) GreaterThanOrEqualTo(other string) bool { + return me.compareTo(other) >= 0 +} + +func (me Version) Equal(other string) bool { + return me.compareTo(other) == 0 +} diff --git a/pkg/version/version_test.go b/pkg/version/version_test.go new file mode 100644 index 0000000000..4bebd0c434 --- /dev/null +++ b/pkg/version/version_test.go @@ -0,0 +1,25 @@ +package version + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := Version(a).compareTo(b); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) + +} diff --git a/reflink_copy_linux.go b/reflink_copy_linux.go deleted file mode 100644 index 74a0cb98f7..0000000000 --- a/reflink_copy_linux.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build amd64 - -package docker - -// FIXME: This could be easily rewritten in pure Go - -/* -#include -#include -#include - -// See linux.git/fs/btrfs/ioctl.h -#define BTRFS_IOCTL_MAGIC 0x94 -#define BTRFS_IOC_CLONE _IOW(BTRFS_IOCTL_MAGIC, 9, int) - -int -btrfs_reflink(int fd_out, int fd_in) -{ - int res; - res = ioctl(fd_out, BTRFS_IOC_CLONE, fd_in); - if (res < 0) - return errno; - return 0; -} - -*/ -import "C" - -import ( - "io" - "os" - "syscall" -) - -// FIXME: Move this to btrfs package? - -func BtrfsReflink(fd_out, fd_in uintptr) error { - res := C.btrfs_reflink(C.int(fd_out), C.int(fd_in)) - if res != 0 { - return syscall.Errno(res) - } - return nil -} - -func CopyFile(dstFile, srcFile *os.File) error { - err := BtrfsReflink(dstFile.Fd(), srcFile.Fd()) - if err == nil { - return nil - } - - // Fall back to normal copy - // FIXME: Check the return of Copy and compare with dstFile.Stat().Size - _, err = io.Copy(dstFile, srcFile) - return err -} diff --git a/reflink_copy_unsupported.go b/reflink_copy_unsupported.go deleted file mode 100644 index 271ed0178f..0000000000 --- a/reflink_copy_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux !amd64 - -package docker - -import ( - "io" - "os" -) - -func CopyFile(dstFile, srcFile *os.File) error { - // No BTRFS reflink suppport, Fall back to normal copy - - // FIXME: Check the return of Copy and compare with dstFile.Stat().Size - _, err := io.Copy(dstFile, srcFile) - return err -} diff --git a/registry/registry.go b/registry/registry.go index df94302305..543dcea383 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -2,6 +2,7 @@ package registry import ( "bytes" + "crypto/sha256" "encoding/json" "errors" "fmt" @@ -91,7 +92,7 @@ func validateRepositoryName(repositoryName string) error { return nil } -// Resolves a repository name to a endpoint + name +// Resolves a repository name to a hostname + name func ResolveRepositoryName(reposName string) (string, string, error) { if strings.Contains(reposName, "://") { // It cannot contain a scheme! @@ -117,11 +118,8 @@ func ResolveRepositoryName(reposName string) (string, string, error) { if err := validateRepositoryName(reposName); err != nil { return "", "", err } - endpoint, err := ExpandAndVerifyRegistryUrl(hostname) - if err != nil { - return "", "", err - } - return endpoint, reposName, err + + return hostname, reposName, nil } // this method expands the registry name as used in the prefix of a repo @@ -388,6 +386,7 @@ func (r *Registry) PushImageChecksumRegistry(imgData *ImgData, registry string, } setTokenAuth(req, token) req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) res, err := doWithCookies(r.client, req) if err != nil { @@ -446,26 +445,28 @@ func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regis return nil } -func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, err error) { +func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer") - tarsumLayer := &utils.TarSum{Reader: layer} + h := sha256.New() + checksumLayer := &utils.CheckSum{Reader: layer, Hash: h} + tarsumLayer := &utils.TarSum{Reader: checksumLayer} req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", tarsumLayer) if err != nil { - return "", err + return "", "", err } req.ContentLength = -1 req.TransferEncoding = []string{"chunked"} setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { - return "", fmt.Errorf("Failed to upload layer: %s", err) + return "", "", fmt.Errorf("Failed to upload layer: %s", err) } if rc, ok := layer.(io.Closer); ok { if err := rc.Close(); err != nil { - return "", err + return "", "", err } } defer res.Body.Close() @@ -473,11 +474,13 @@ func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registr if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { - return "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } - return "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody), res) + return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody), res) } - return tarsumLayer.Sum(jsonRaw), nil + + checksumPayload = "sha256:" + checksumLayer.Sum() + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil } // push a tag on the registry. @@ -671,9 +674,10 @@ type RepositoryData struct { } type ImgData struct { - ID string `json:"id"` - Checksum string `json:"checksum,omitempty"` - Tag string `json:",omitempty"` + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` } type Registry struct { diff --git a/registry/registry_test.go b/registry/registry_test.go index 16bc431e55..82a27a166f 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -124,7 +124,7 @@ func TestPushImageJSONRegistry(t *testing.T) { func TestPushImageLayerRegistry(t *testing.T) { r := spawnTestRegistry(t) layer := strings.NewReader("") - _, err := r.PushImageLayerRegistry(IMAGE_ID, layer, makeURL("/v1/"), TOKEN, []byte{}) + _, _, err := r.PushImageLayerRegistry(IMAGE_ID, layer, makeURL("/v1/"), TOKEN, []byte{}) if err != nil { t.Fatal(err) } @@ -145,7 +145,7 @@ func TestResolveRepositoryName(t *testing.T) { if err != nil { t.Fatal(err) } - assertEqual(t, ep, "http://"+u+"/v1/", "Expected endpoint to be "+u) + assertEqual(t, ep, u, "Expected endpoint to be "+u) assertEqual(t, repo, "private/moonbase", "Expected endpoint to be private/moonbase") } diff --git a/runconfig/compare.go b/runconfig/compare.go new file mode 100644 index 0000000000..c09f897716 --- /dev/null +++ b/runconfig/compare.go @@ -0,0 +1,67 @@ +package runconfig + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.Memory != b.Memory || + a.MemorySwap != b.MemorySwap || + a.CpuShares != b.CpuShares || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty || + a.VolumesFrom != b.VolumesFrom { + return false + } + if len(a.Cmd) != len(b.Cmd) || + len(a.Dns) != len(b.Dns) || + len(a.Env) != len(b.Env) || + len(a.PortSpecs) != len(b.PortSpecs) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Dns); i++ { + if a.Dns[i] != b.Dns[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for i := 0; i < len(a.PortSpecs); i++ { + if a.PortSpecs[i] != b.PortSpecs[i] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/runconfig/config.go b/runconfig/config.go new file mode 100644 index 0000000000..9faa823a57 --- /dev/null +++ b/runconfig/config.go @@ -0,0 +1,76 @@ +package runconfig + +import ( + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" +) + +// Note: the Config structure should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +type Config struct { + Hostname string + Domainname string + User string + Memory int64 // Memory limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 // CPU shares (relative weight vs. other containers) + AttachStdin bool + AttachStdout bool + AttachStderr bool + PortSpecs []string // Deprecated - Can be in the format of 8080/tcp + ExposedPorts map[nat.Port]struct{} + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string + Cmd []string + Dns []string + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Volumes map[string]struct{} + VolumesFrom string + WorkingDir string + Entrypoint []string + NetworkDisabled bool + OnBuild []string +} + +func ContainerConfigFromJob(job *engine.Job) *Config { + config := &Config{ + Hostname: job.Getenv("Hostname"), + Domainname: job.Getenv("Domainname"), + User: job.Getenv("User"), + Memory: job.GetenvInt64("Memory"), + MemorySwap: job.GetenvInt64("MemorySwap"), + CpuShares: job.GetenvInt64("CpuShares"), + AttachStdin: job.GetenvBool("AttachStdin"), + AttachStdout: job.GetenvBool("AttachStdout"), + AttachStderr: job.GetenvBool("AttachStderr"), + Tty: job.GetenvBool("Tty"), + OpenStdin: job.GetenvBool("OpenStdin"), + StdinOnce: job.GetenvBool("StdinOnce"), + Image: job.Getenv("Image"), + VolumesFrom: job.Getenv("VolumesFrom"), + WorkingDir: job.Getenv("WorkingDir"), + NetworkDisabled: job.GetenvBool("NetworkDisabled"), + } + job.GetenvJson("ExposedPorts", &config.ExposedPorts) + job.GetenvJson("Volumes", &config.Volumes) + if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { + config.PortSpecs = PortSpecs + } + if Env := job.GetenvList("Env"); Env != nil { + config.Env = Env + } + if Cmd := job.GetenvList("Cmd"); Cmd != nil { + config.Cmd = Cmd + } + if Dns := job.GetenvList("Dns"); Dns != nil { + config.Dns = Dns + } + if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { + config.Entrypoint = Entrypoint + } + + return config +} diff --git a/config_test.go b/runconfig/config_test.go similarity index 82% rename from config_test.go rename to runconfig/config_test.go index 31c961135a..3ef31491fc 100644 --- a/config_test.go +++ b/runconfig/config_test.go @@ -1,10 +1,11 @@ -package docker +package runconfig import ( + "github.com/dotcloud/docker/nat" "testing" ) -func TestCompareConfig(t *testing.T) { +func TestCompare(t *testing.T) { volumes1 := make(map[string]struct{}) volumes1["/test1"] = struct{}{} config1 := Config{ @@ -44,24 +45,24 @@ func TestCompareConfig(t *testing.T) { VolumesFrom: "11111111", Volumes: volumes2, } - if CompareConfig(&config1, &config2) { - t.Fatalf("CompareConfig should return false, Dns are different") + if Compare(&config1, &config2) { + t.Fatalf("Compare should return false, Dns are different") } - if CompareConfig(&config1, &config3) { - t.Fatalf("CompareConfig should return false, PortSpecs are different") + if Compare(&config1, &config3) { + t.Fatalf("Compare should return false, PortSpecs are different") } - if CompareConfig(&config1, &config4) { - t.Fatalf("CompareConfig should return false, VolumesFrom are different") + if Compare(&config1, &config4) { + t.Fatalf("Compare should return false, VolumesFrom are different") } - if CompareConfig(&config1, &config5) { - t.Fatalf("CompareConfig should return false, Volumes are different") + if Compare(&config1, &config5) { + t.Fatalf("Compare should return false, Volumes are different") } - if !CompareConfig(&config1, &config1) { - t.Fatalf("CompareConfig should return true") + if !Compare(&config1, &config1) { + t.Fatalf("Compare should return true") } } -func TestMergeConfig(t *testing.T) { +func TestMerge(t *testing.T) { volumesImage := make(map[string]struct{}) volumesImage["/test1"] = struct{}{} volumesImage["/test2"] = struct{}{} @@ -82,7 +83,7 @@ func TestMergeConfig(t *testing.T) { Volumes: volumesUser, } - if err := MergeConfig(configUser, configImage); err != nil { + if err := Merge(configUser, configImage); err != nil { t.Error(err) } @@ -125,7 +126,7 @@ func TestMergeConfig(t *testing.T) { t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) } - ports, _, err := parsePortSpecs([]string{"0000"}) + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) if err != nil { t.Error(err) } @@ -133,7 +134,7 @@ func TestMergeConfig(t *testing.T) { ExposedPorts: ports, } - if err := MergeConfig(configUser, configImage2); err != nil { + if err := Merge(configUser, configImage2); err != nil { t.Error(err) } diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go new file mode 100644 index 0000000000..6c8618ee81 --- /dev/null +++ b/runconfig/hostconfig.go @@ -0,0 +1,39 @@ +package runconfig + +import ( + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" +) + +type HostConfig struct { + Binds []string + ContainerIDFile string + LxcConf []KeyValuePair + Privileged bool + PortBindings nat.PortMap + Links []string + PublishAllPorts bool +} + +type KeyValuePair struct { + Key string + Value string +} + +func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { + hostConfig := &HostConfig{ + ContainerIDFile: job.Getenv("ContainerIDFile"), + Privileged: job.GetenvBool("Privileged"), + PublishAllPorts: job.GetenvBool("PublishAllPorts"), + } + job.GetenvJson("LxcConf", &hostConfig.LxcConf) + job.GetenvJson("PortBindings", &hostConfig.PortBindings) + if Binds := job.GetenvList("Binds"); Binds != nil { + hostConfig.Binds = Binds + } + if Links := job.GetenvList("Links"); Links != nil { + hostConfig.Links = Links + } + + return hostConfig +} diff --git a/runconfig/merge.go b/runconfig/merge.go new file mode 100644 index 0000000000..a8d677baa8 --- /dev/null +++ b/runconfig/merge.go @@ -0,0 +1,119 @@ +package runconfig + +import ( + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/utils" + "strings" +) + +func Merge(userConf, imageConf *Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if userConf.Memory == 0 { + userConf.Memory = imageConf.Memory + } + if userConf.MemorySwap == 0 { + userConf.MemorySwap = imageConf.MemorySwap + } + if userConf.CpuShares == 0 { + userConf.CpuShares = imageConf.CpuShares + } + if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + ports, _, err := nat.ParsePortSpecs(userConf.PortSpecs) + if err != nil { + return err + } + for port := range ports { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + userConf.PortSpecs = nil + } + if imageConf.PortSpecs != nil && len(imageConf.PortSpecs) > 0 { + // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. + utils.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(imageConf.PortSpecs) + if err != nil { + return err + } + for port := range ports { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + if !userConf.Tty { + userConf.Tty = imageConf.Tty + } + if !userConf.OpenStdin { + userConf.OpenStdin = imageConf.OpenStdin + } + if !userConf.StdinOnce { + userConf.StdinOnce = imageConf.StdinOnce + } + if userConf.Env == nil || len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if imageEnvKey == userEnvKey { + found = true + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + if userConf.Cmd == nil || len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + } + if userConf.Dns == nil || len(userConf.Dns) == 0 { + userConf.Dns = imageConf.Dns + } else { + //duplicates aren't an issue here + userConf.Dns = append(userConf.Dns, imageConf.Dns...) + } + if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 { + userConf.Entrypoint = imageConf.Entrypoint + } + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if userConf.VolumesFrom == "" { + userConf.VolumesFrom = imageConf.VolumesFrom + } + if userConf.Volumes == nil || len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + return nil +} diff --git a/runconfig/parse.go b/runconfig/parse.go new file mode 100644 index 0000000000..fb08c068b2 --- /dev/null +++ b/runconfig/parse.go @@ -0,0 +1,246 @@ +package runconfig + +import ( + "fmt" + "github.com/dotcloud/docker/nat" + flag "github.com/dotcloud/docker/pkg/mflag" + "github.com/dotcloud/docker/pkg/opts" + "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "path" + "strings" +) + +var ( + ErrInvalidWorikingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: -rm and -d") +) + +//FIXME Only used in tests +func Parse(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + cmd := flag.NewFlagSet("run", flag.ContinueOnError) + cmd.SetOutput(ioutil.Discard) + cmd.Usage = nil + return parseRun(cmd, args, sysInfo) +} + +// FIXME: this maps the legacy commands.go code. It should be merged with Parse to only expose a single parse function. +func ParseSubcommand(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + return parseRun(cmd, args, sysInfo) +} + +func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + var ( + // FIXME: use utils.ListOpts for attach and volumes? + flAttach = opts.NewListOpts(opts.ValidateAttach) + flVolumes = opts.NewListOpts(opts.ValidatePath) + flLinks = opts.NewListOpts(opts.ValidateLink) + flEnv = opts.NewListOpts(opts.ValidateEnv) + + flPublish opts.ListOpts + flExpose opts.ListOpts + flDns opts.ListOpts + flVolumesFrom opts.ListOpts + flLxcOpts opts.ListOpts + + flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") + flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container") + flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") + flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces") + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty") + flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") + flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image") + flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") + flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: , where unit = b, k, m or g)") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") + flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") + flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + + // For documentation purpose + _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") + _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") + ) + + cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.") + cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") + cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)") + cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") + + cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) + cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") + cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") + cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + + if err := cmd.Parse(args); err != nil { + return nil, nil, cmd, err + } + + // Check if the kernel supports memory limit cgroup. + if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit { + *flMemoryString = "" + } + + // Validate input params + if *flDetach && flAttach.Len() > 0 { + return nil, nil, cmd, ErrConflictAttachDetach + } + if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { + return nil, nil, cmd, ErrInvalidWorikingDirectory + } + if *flDetach && *flAutoRemove { + return nil, nil, cmd, ErrConflictDetachAutoRemove + } + + // If neither -d or -a are set, attach to everything by default + if flAttach.Len() == 0 && !*flDetach { + if !*flDetach { + flAttach.Set("stdout") + flAttach.Set("stderr") + if *flStdin { + flAttach.Set("stdin") + } + } + } + + var flMemory int64 + if *flMemoryString != "" { + parsedMemory, err := utils.RAMInBytes(*flMemoryString) + if err != nil { + return nil, nil, cmd, err + } + flMemory = parsedMemory + } + + var binds []string + // add any bind targets to the list of container volumes + for bind := range flVolumes.GetMap() { + if arr := strings.Split(bind, ":"); len(arr) > 1 { + if arr[0] == "/" { + return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'") + } + dstDir := arr[1] + flVolumes.Set(dstDir) + binds = append(binds, bind) + flVolumes.Delete(bind) + } else if bind == "/" { + return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'") + } + } + + var ( + parsedArgs = cmd.Args() + runCmd []string + entrypoint []string + image string + ) + if len(parsedArgs) >= 1 { + image = cmd.Arg(0) + } + if len(parsedArgs) > 1 { + runCmd = parsedArgs[1:] + } + if *flEntrypoint != "" { + entrypoint = []string{*flEntrypoint} + } + + lxcConf, err := parseLxcConfOpts(flLxcOpts) + if err != nil { + return nil, nil, cmd, err + } + + var ( + domainname string + hostname = *flHostname + parts = strings.SplitN(hostname, ".", 2) + ) + if len(parts) > 1 { + hostname = parts[0] + domainname = parts[1] + } + + ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) + if err != nil { + return nil, nil, cmd, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range flExpose.GetAll() { + if strings.Contains(e, ":") { + return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) + } + p := nat.NewPort(nat.SplitProtoPort(e)) + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + + config := &Config{ + Hostname: hostname, + Domainname: domainname, + PortSpecs: nil, // Deprecated + ExposedPorts: ports, + User: *flUser, + Tty: *flTty, + NetworkDisabled: !*flNetwork, + OpenStdin: *flStdin, + Memory: flMemory, + CpuShares: *flCpuShares, + AttachStdin: flAttach.Get("stdin"), + AttachStdout: flAttach.Get("stdout"), + AttachStderr: flAttach.Get("stderr"), + Env: flEnv.GetAll(), + Cmd: runCmd, + Dns: flDns.GetAll(), + Image: image, + Volumes: flVolumes.GetMap(), + VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), + Entrypoint: entrypoint, + WorkingDir: *flWorkingDir, + } + + hostConfig := &HostConfig{ + Binds: binds, + ContainerIDFile: *flContainerIDFile, + LxcConf: lxcConf, + Privileged: *flPrivileged, + PortBindings: portBindings, + Links: flLinks.GetAll(), + PublishAllPorts: *flPublishAll, + } + + if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { + //fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") + config.MemorySwap = -1 + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + return config, hostConfig, cmd, nil +} + +func parseLxcConfOpts(opts opts.ListOpts) ([]KeyValuePair, error) { + out := make([]KeyValuePair, opts.Len()) + for i, o := range opts.GetAll() { + k, v, err := parseLxcOpt(o) + if err != nil { + return nil, err + } + out[i] = KeyValuePair{Key: k, Value: v} + } + return out, nil +} + +func parseLxcOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go new file mode 100644 index 0000000000..2b89e88ec3 --- /dev/null +++ b/runconfig/parse_test.go @@ -0,0 +1,22 @@ +package runconfig + +import ( + "testing" +) + +func TestParseLxcConfOpt(t *testing.T) { + opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} + + for _, o := range opts { + k, v, err := parseLxcOpt(o) + if err != nil { + t.FailNow() + } + if k != "lxc.utsname" { + t.Fail() + } + if v != "docker" { + t.Fail() + } + } +} diff --git a/runtime.go b/runtime.go index 7e4ae79b40..84f11e87b2 100644 --- a/runtime.go +++ b/runtime.go @@ -4,10 +4,11 @@ import ( "container/list" "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/execdriver/chroot" "github.com/dotcloud/docker/execdriver/lxc" + "github.com/dotcloud/docker/execdriver/native" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/graphdriver/aufs" _ "github.com/dotcloud/docker/graphdriver/btrfs" @@ -17,6 +18,7 @@ import ( "github.com/dotcloud/docker/networkdriver/portallocator" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -133,14 +135,6 @@ func (runtime *Runtime) Register(container *Container) error { return err } - // Get the root filesystem from the driver - basefs, err := runtime.driver.Get(container.ID) - if err != nil { - return fmt.Errorf("Error getting container filesystem %s from driver %s: %s", container.ID, runtime.driver, err) - } - defer runtime.driver.Put(container.ID) - container.basefs = basefs - container.runtime = runtime // Attach to stdout and stderr @@ -160,12 +154,39 @@ func (runtime *Runtime) Register(container *Container) error { // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { - info := runtime.execDriver.Info(container.ID) + if container.State.IsGhost() { + utils.Debugf("killing ghost %s", container.ID) + existingPid := container.State.Pid + container.State.SetGhost(false) + container.State.SetStopped(0) + + if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { + lxc.KillLxc(container.ID, 9) + } else { + command := &execdriver.Command{ + ID: container.ID, + } + command.Process = &os.Process{Pid: existingPid} + runtime.execDriver.Kill(command, 9) + } + // ensure that the filesystem is also unmounted + unmountVolumesForContainer(container) + if err := container.Unmount(); err != nil { + utils.Debugf("ghost unmount error %s", err) + } + } + + info := runtime.execDriver.Info(container.ID) if !info.IsRunning() { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") + unmountVolumesForContainer(container) + if err := container.Unmount(); err != nil { + utils.Debugf("restart unmount error %s", err) + } + container.State.SetGhost(false) container.State.SetStopped(0) if err := container.Start(); err != nil { @@ -178,15 +199,6 @@ func (runtime *Runtime) Register(container *Container) error { return err } } - } else { - utils.Debugf("Reconnecting to container %v", container.ID) - - if err := container.allocateNetwork(); err != nil { - return err - } - - container.waitLock = make(chan struct{}) - go container.monitor(nil) } } else { // When the container is not running, we still initialize the waitLock @@ -336,7 +348,7 @@ func (runtime *Runtime) restore() error { } // Create creates a new container from the given configuration with a given name. -func (runtime *Runtime) Create(config *Config, name string) (*Container, []string, error) { +func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) { // Lookup image img, err := runtime.repositories.LookupImage(config.Image) if err != nil { @@ -354,7 +366,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin return nil, nil, fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) } - checkDeprecatedExpose := func(config *Config) bool { + checkDeprecatedExpose := func(config *runconfig.Config) bool { if config != nil { if config.PortSpecs != nil { for _, p := range config.PortSpecs { @@ -369,18 +381,16 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin warnings := []string{} if checkDeprecatedExpose(img.Config) || checkDeprecatedExpose(config) { - warnings = append(warnings, "The mapping to public ports on your host has been deprecated. Use -p to publish the ports.") + warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.") } if img.Config != nil { - if err := MergeConfig(config, img.Config); err != nil { + if err := runconfig.Merge(config, img.Config); err != nil { return nil, nil, err } } - if len(config.Entrypoint) != 0 && config.Cmd == nil { - config.Cmd = []string{} - } else if config.Cmd == nil || len(config.Cmd) == 0 { + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { return nil, nil, fmt.Errorf("No command specified") } @@ -404,7 +414,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin // Set the enitity in the graph using the default name specified if _, err := runtime.containerGraph.Set(name, id); err != nil { - if !strings.HasSuffix(err.Error(), "name are not unique") { + if !graphdb.IsNonUniqueNameError(err) { return nil, nil, err } @@ -450,11 +460,12 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin Path: entrypoint, Args: args, //FIXME: de-duplicate from config Config: config, - hostConfig: &HostConfig{}, + hostConfig: &runconfig.HostConfig{}, Image: img.ID, // Always use the resolved image id NetworkSettings: &NetworkSettings{}, Name: name, Driver: runtime.driver.String(), + ExecDriver: runtime.execDriver.Name(), } container.root = runtime.containerRoot(container.ID) // Step 1: create the container directory. @@ -527,7 +538,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository -func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *Config) (*Image, error) { +func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*Image, error) { // FIXME: freeze the container before copying it to avoid data corruption? // FIXME: this shouldn't be in commands. if err := container.Mount(); err != nil { @@ -539,6 +550,8 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a if err != nil { return nil, err } + defer rwTar.Close() + // Create a new image from the container's base layers + a new layer from container changes img, err := runtime.graph.Create(rwTar, container, comment, author, config) if err != nil { @@ -688,7 +701,7 @@ func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime return nil, err } - localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", VERSION)) + localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) sysInitPath := utils.DockerInitPath(localCopy) if sysInitPath == "" { return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.io/en/latest/contributing/devenvironment for official build instructions.") @@ -708,22 +721,22 @@ func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime sysInitPath = localCopy } - sysInfo := sysinfo.New(false) + var ( + ed execdriver.Driver + sysInfo = sysinfo.New(false) + ) - /* - temporarilly disabled. - */ - if false { - var ed execdriver.Driver - if driver := os.Getenv("EXEC_DRIVER"); driver == "lxc" { - ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor) - } else { - ed, err = chroot.NewDriver() - } - if ed != nil { - } + switch config.ExecDriver { + case "lxc": + // we want to five the lxc driver the full docker root because it needs + // to access and write config and template files in /var/lib/docker/containers/* + // to be backwards compatible + ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor) + case "native": + ed, err = native.NewDriver(path.Join(config.Root, "execdriver", "native")) + default: + return nil, fmt.Errorf("unknown exec driver %s", config.ExecDriver) } - ed, err := lxc.NewDriver(config.Root, sysInfo.AppArmor) if err != nil { return nil, err } @@ -825,21 +838,21 @@ func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) { if err != nil { return nil, err } - return EofReader(archive, func() { runtime.driver.Put(container.ID) }), nil + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + runtime.driver.Put(container.ID) + return err + }), nil } -func (runtime *Runtime) Run(c *Container, startCallback execdriver.StartCallback) (int, error) { - return runtime.execDriver.Run(c.command, startCallback) +func (runtime *Runtime) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + return runtime.execDriver.Run(c.command, pipes, startCallback) } func (runtime *Runtime) Kill(c *Container, sig int) error { return runtime.execDriver.Kill(c.command, sig) } -func (runtime *Runtime) RestoreCommand(c *Container) error { - return runtime.execDriver.Restore(c.command) -} - // Nuke kills all containers then removes all content // from the content root, including images, volumes and // container filesystems. diff --git a/server.go b/server.go index f108f61740..d824d78d7a 100644 --- a/server.go +++ b/server.go @@ -2,13 +2,14 @@ package docker import ( "encoding/json" - "errors" "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -28,18 +29,10 @@ import ( "time" ) -func (srv *Server) Close() error { - return srv.runtime.Close() -} - -func init() { - engine.Register("initserver", jobInitServer) -} - // jobInitApi runs the remote api server `srv` as a daemon, // Only one api server can run at the same time - this is enforced by a pidfile. // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. -func jobInitServer(job *engine.Job) engine.Status { +func InitServer(job *engine.Job) engine.Status { job.Logf("Creating server") srv, err := NewServer(job.Eng, DaemonConfigFromJob(job)) if err != nil { @@ -290,6 +283,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status { if err != nil { return job.Errorf("%s: %s", name, err) } + defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(job.Stdout, data); err != nil { @@ -359,6 +353,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } + defer fs.Close() if _, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) @@ -398,6 +393,7 @@ func (srv *Server) exportImage(image *Image, tempdir string) error { if err != nil { return err } + defer fs.Close() fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) if err != nil { @@ -434,14 +430,14 @@ func (srv *Server) Build(job *engine.Job) engine.Status { authConfig = &auth.AuthConfig{} configFile = &auth.ConfigFile{} tag string - context io.Reader + context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = utils.ParseRepositoryTag(repoName) if remoteURL == "" { - context = job.Stdin + context = ioutil.NopCloser(job.Stdin) } else if utils.IsGIT(remoteURL) { if !strings.HasPrefix(remoteURL, "git://") { remoteURL = "https://" + remoteURL @@ -452,7 +448,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status { } defer os.RemoveAll(root) - if output, err := exec.Command("git", "clone", remoteURL, root).CombinedOutput(); err != nil { + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return job.Errorf("Error trying to use git: %s (%s)", err, output) } @@ -471,12 +467,13 @@ func (srv *Server) Build(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - c, err := MkBuildContext(string(dockerFile), nil) + c, err := archive.Generate("Dockerfile", string(dockerFile)) if err != nil { return job.Error(err) } context = c } + defer context.Close() sf := utils.NewStreamFormatter(job.GetenvBool("json")) b := NewBuildFile(srv, @@ -661,7 +658,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status { } defer file.Body.Close() - config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo) + config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo) if err != nil { return job.Error(err) } @@ -824,10 +821,10 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { v.SetInt("NFd", utils.GetTotalUsedFds()) v.SetInt("NGoroutines", runtime.NumGoroutine()) v.Set("ExecutionDriver", srv.runtime.execDriver.Name()) - v.SetInt("NEventsListener", len(srv.events)) + v.SetInt("NEventsListener", len(srv.listeners)) v.Set("KernelVersion", kernelVersion) v.Set("IndexServerAddress", auth.IndexServerAddress()) - v.Set("InitSha1", utils.INITSHA1) + v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) @@ -1010,7 +1007,11 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { out.Set("Id", container.ID) out.SetList("Names", names[container.ID]) out.Set("Image", srv.runtime.repositories.ImageName(container.Image)) - out.Set("Command", fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))) + if len(container.Args) > 0 { + out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, strings.Join(container.Args, " "))) + } else { + out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) + } out.SetInt64("Created", container.Created.Unix()) out.Set("Status", container.State.String()) str, err := container.NetworkSettings.PortMappingAPI().ToListString() @@ -1042,7 +1043,7 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { if container == nil { return job.Errorf("No such container: %s", name) } - var config Config + var config runconfig.Config if err := job.GetenvJson("config", &config); err != nil { return job.Error(err) } @@ -1330,7 +1331,12 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { defer srv.poolRemove("pull", localName+":"+tag) // Resolve the Repository name from fqn to endpoint + name - endpoint, remoteName, err := registry.ResolveRepositoryName(localName) + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } @@ -1498,11 +1504,12 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, defer os.RemoveAll(layerData.Name()) // Send the layer - checksum, err = r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) + checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) if err != nil { return "", err } imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload // Send the checksum if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { return "", err @@ -1532,7 +1539,12 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status { defer srv.poolRemove("push", localName) // Resolve the Repository name from fqn to endpoint + name - endpoint, remoteName, err := registry.ResolveRepositoryName(localName) + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } @@ -1573,7 +1585,7 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status { repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) - archive io.Reader + archive archive.ArchiveReader resp *http.Response ) if len(job.Args) > 2 { @@ -1599,7 +1611,9 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") + progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") + defer progressReader.Close() + archive = progressReader } img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil) if err != nil { @@ -1622,16 +1636,16 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } - config := ContainerConfigFromJob(job) + config := runconfig.ContainerConfigFromJob(job) if config.Memory != 0 && config.Memory < 524288 { return job.Errorf("Minimum memory limit allowed is 512k") } if config.Memory > 0 && !srv.runtime.sysInfo.MemoryLimit { - job.Errorf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") + job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") config.Memory = 0 } if config.Memory > 0 && !srv.runtime.sysInfo.SwapLimit { - job.Errorf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") + job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") config.MemorySwap = -1 } resolvConf, err := utils.GetResolvConf() @@ -1639,7 +1653,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { return job.Error(err) } if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { - job.Errorf("WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v\n", defaultDns) + job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", defaultDns) config.Dns = defaultDns } @@ -1655,7 +1669,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { return job.Error(err) } if !container.Config.NetworkDisabled && srv.runtime.sysInfo.IPv4ForwardingDisabled { - job.Errorf("WARNING: IPv4 forwarding is disabled.\n") + job.Errorf("IPv4 forwarding is disabled.\n") } srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image)) // FIXME: this is necessary because runtime.Create might return a nil container @@ -1665,7 +1679,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { job.Printf("%s\n", container.ID) } for _, warning := range buildWarnings { - return job.Errorf("%s\n", warning) + job.Errorf("%s\n", warning) } return engine.StatusOK } @@ -1699,6 +1713,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { name := job.Args[0] removeVolume := job.GetenvBool("removeVolume") removeLink := job.GetenvBool("removeLink") + forceRemove := job.GetenvBool("forceRemove") container := srv.runtime.Get(name) @@ -1736,7 +1751,13 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { if container != nil { if container.State.IsRunning() { - return job.Errorf("Impossible to remove a running container, please stop it first") + if forceRemove { + if err := container.Stop(5); err != nil { + return job.Errorf("Could not stop running container, cannot remove - %v", err) + } + } else { + return job.Errorf("Impossible to remove a running container, please stop it first or use -f") + } } if err := srv.runtime.Destroy(container); err != nil { return job.Errorf("Cannot destroy container %s: %s", name, err) @@ -1806,102 +1827,33 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { return engine.StatusOK } -var ErrImageReferenced = errors.New("Image referenced by a repository") - -func (srv *Server) deleteImageAndChildren(id string, imgs *engine.Table, byParents map[string][]*Image) error { - // If the image is referenced by a repo, do not delete - if len(srv.runtime.repositories.ByID()[id]) != 0 { - return ErrImageReferenced - } - // If the image is not referenced but has children, go recursive - referenced := false - for _, img := range byParents[id] { - if err := srv.deleteImageAndChildren(img.ID, imgs, byParents); err != nil { - if err != ErrImageReferenced { - return err - } - referenced = true - } - } - if referenced { - return ErrImageReferenced - } - - // If the image is not referenced and has no children, remove it - byParents, err := srv.runtime.graph.ByParent() - if err != nil { - return err - } - if len(byParents[id]) == 0 && srv.canDeleteImage(id) == nil { - if err := srv.runtime.repositories.DeleteAll(id); err != nil { - return err - } - err := srv.runtime.graph.Delete(id) - if err != nil { - return err - } - out := &engine.Env{} - out.Set("Deleted", id) - imgs.Add(out) - srv.LogEvent("delete", id, "") - return nil - } - return nil -} - -func (srv *Server) deleteImageParents(img *Image, imgs *engine.Table) error { - if img.Parent != "" { - parent, err := srv.runtime.graph.Get(img.Parent) - if err != nil { - return err - } - byParents, err := srv.runtime.graph.ByParent() - if err != nil { - return err - } - // Remove all children images - if err := srv.deleteImageAndChildren(img.Parent, imgs, byParents); err != nil { - return err - } - return srv.deleteImageParents(parent, imgs) - } - return nil -} - -func (srv *Server) DeleteImage(name string, autoPrune bool) (*engine.Table, error) { +func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error { var ( repoName, tag string - img, err = srv.runtime.repositories.LookupImage(name) - imgs = engine.NewTable("", 0) tags = []string{} ) + repoName, tag = utils.ParseRepositoryTag(name) + if tag == "" { + tag = DEFAULTTAG + } + + img, err := srv.runtime.repositories.LookupImage(name) if err != nil { - return nil, fmt.Errorf("No such image: %s", name) - } - - // FIXME: What does autoPrune mean ? - if !autoPrune { - if err := srv.runtime.graph.Delete(img.ID); err != nil { - return nil, fmt.Errorf("Cannot delete image %s: %s", name, err) + if r, _ := srv.runtime.repositories.Get(repoName); r != nil { + return fmt.Errorf("No such image: %s:%s", repoName, tag) } - return nil, nil + return fmt.Errorf("No such image: %s", name) } - if !strings.Contains(img.ID, name) { - repoName, tag = utils.ParseRepositoryTag(name) + if strings.Contains(img.ID, name) { + repoName = "" + tag = "" } - // If we have a repo and the image is not referenced anywhere else - // then just perform an untag and do not validate. - // - // i.e. only validate if we are performing an actual delete and not - // an untag op - if repoName != "" && len(srv.runtime.repositories.ByID()[img.ID]) == 1 { - // Prevent deletion if image is used by a container - if err := srv.canDeleteImage(img.ID); err != nil { - return nil, err - } + byParents, err := srv.runtime.graph.ByParent() + if err != nil { + return err } //If delete by id, see if the id belong only to one repository @@ -1913,51 +1865,68 @@ func (srv *Server) DeleteImage(name string, autoPrune bool) (*engine.Table, erro if parsedTag != "" { tags = append(tags, parsedTag) } - } else if repoName != parsedRepo { + } else if repoName != parsedRepo && !force { // the id belongs to multiple repos, like base:latest and user:test, // in that case return conflict - return nil, fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories", utils.TruncateID(img.ID)) + return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) } } } else { tags = append(tags, tag) } + if !first && len(tags) > 0 { + return nil + } + //Untag the current image for _, tag := range tags { tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag) if err != nil { - return nil, err + return err } if tagDeleted { out := &engine.Env{} - out.Set("Untagged", img.ID) + out.Set("Untagged", repoName+":"+tag) imgs.Add(out) srv.LogEvent("untag", img.ID, "") } } + tags = srv.runtime.repositories.ByID()[img.ID] + if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { + if len(byParents[img.ID]) == 0 { + if err := srv.canDeleteImage(img.ID); err != nil { + return err + } + if err := srv.runtime.repositories.DeleteAll(img.ID); err != nil { + return err + } + if err := srv.runtime.graph.Delete(img.ID); err != nil { + return err + } + out := &engine.Env{} + out.Set("Deleted", img.ID) + imgs.Add(out) + srv.LogEvent("delete", img.ID, "") + if img.Parent != "" { + err := srv.DeleteImage(img.Parent, imgs, false, force) + if first { + return err + } - if len(srv.runtime.repositories.ByID()[img.ID]) == 0 { - if err := srv.deleteImageAndChildren(img.ID, imgs, nil); err != nil { - if err != ErrImageReferenced { - return imgs, err - } - } else if err := srv.deleteImageParents(img, imgs); err != nil { - if err != ErrImageReferenced { - return imgs, err } + } } - return imgs, nil + return nil } func (srv *Server) ImageDelete(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } - - imgs, err := srv.DeleteImage(job.Args[0], job.GetenvBool("autoPrune")) - if err != nil { + imgs := engine.NewTable("", 0) + if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil { return job.Error(err) } if len(imgs.Data) == 0 { @@ -1988,7 +1957,7 @@ func (srv *Server) canDeleteImage(imgID string) error { return nil } -func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) { +func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*Image, error) { // Retrieve all images images, err := srv.runtime.graph.Map() @@ -2012,7 +1981,7 @@ func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) if err != nil { return nil, err } - if CompareConfig(&img.ContainerConfig, config) { + if runconfig.Compare(&img.ContainerConfig, config) { if match == nil || match.Created.Before(img.Created) { match = img } @@ -2021,7 +1990,7 @@ func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) return match, nil } -func (srv *Server) RegisterLinks(container *Container, hostConfig *HostConfig) error { +func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { runtime := srv.runtime if hostConfig != nil && hostConfig.Links != nil { @@ -2065,7 +2034,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { - hostConfig := ContainerHostConfigFromJob(job) + hostConfig := runconfig.ContainerHostConfigFromJob(job) // Validate the HostConfig binds. Make sure that: // 1) the source of a bind mount isn't / // The bind mount "/:/foo" isn't allowed. @@ -2309,7 +2278,7 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status { } object = &struct { *Container - HostConfig *HostConfig + HostConfig *runconfig.HostConfig }{container, container.hostConfig} default: return job.Errorf("Unknown kind: %s", kind) @@ -2339,6 +2308,7 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } + defer data.Close() if _, err := io.Copy(job.Stdout, data); err != nil { return job.Error(err) @@ -2360,6 +2330,7 @@ func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) { pushingPool: make(map[string]chan struct{}), events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events listeners: make(map[string]chan utils.JSONMessage), + running: true, } runtime.srv = srv return srv, nil @@ -2409,6 +2380,24 @@ func (srv *Server) GetEvents() []utils.JSONMessage { return srv.events } +func (srv *Server) SetRunning(status bool) { + srv.Lock() + defer srv.Unlock() + + srv.running = status +} + +func (srv *Server) IsRunning() bool { + srv.RLock() + defer srv.RUnlock() + return srv.running +} + +func (srv *Server) Close() error { + srv.SetRunning(false) + return srv.runtime.Close() +} + type Server struct { sync.RWMutex runtime *Runtime @@ -2417,4 +2406,5 @@ type Server struct { events []utils.JSONMessage listeners map[string]chan utils.JSONMessage Eng *engine.Engine + running bool } diff --git a/sorter.go b/sorter.go index 9b3e1a9486..b49ac58c24 100644 --- a/sorter.go +++ b/sorter.go @@ -2,31 +2,6 @@ package docker import "sort" -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -func sortPorts(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - type containerSorter struct { containers []*Container by func(i, j *Container) bool diff --git a/sysinit/sysinit.go b/sysinit/sysinit.go index dcf0eddf56..c84c05982c 100644 --- a/sysinit/sysinit.go +++ b/sysinit/sysinit.go @@ -5,8 +5,8 @@ import ( "flag" "fmt" "github.com/dotcloud/docker/execdriver" - _ "github.com/dotcloud/docker/execdriver/chroot" _ "github.com/dotcloud/docker/execdriver/lxc" + _ "github.com/dotcloud/docker/execdriver/native" "io/ioutil" "log" "os" @@ -27,18 +27,12 @@ func setupEnv(args *execdriver.InitArgs) { func executeProgram(args *execdriver.InitArgs) error { setupEnv(args) + dockerInitFct, err := execdriver.GetInitFunc(args.Driver) if err != nil { panic(err) } return dockerInitFct(args) - - if args.Driver == "lxc" { - // Will never reach - } else if args.Driver == "chroot" { - } - - return nil } // Sys Init code @@ -59,19 +53,21 @@ func SysInit() { privileged = flag.Bool("privileged", false, "privileged mode") mtu = flag.Int("mtu", 1500, "interface mtu") driver = flag.String("driver", "", "exec driver") + pipe = flag.Int("pipe", 0, "sync pipe fd") + console = flag.String("console", "", "console (pty slave) path") + root = flag.String("root", ".", "root path for configuration files") ) flag.Parse() // Get env var env []string - content, err := ioutil.ReadFile("/.dockerenv") + content, err := ioutil.ReadFile(".dockerenv") if err != nil { log.Fatalf("Unable to load environment variables: %v", err) } if err := json.Unmarshal(content, &env); err != nil { log.Fatalf("Unable to unmarshal environment variables: %v", err) } - // Propagate the plugin-specific container env variable env = append(env, "container="+os.Getenv("container")) @@ -85,6 +81,9 @@ func SysInit() { Args: flag.Args(), Mtu: *mtu, Driver: *driver, + Console: *console, + Pipe: *pipe, + Root: *root, } if err := executeProgram(args); err != nil { diff --git a/tags_unit_test.go b/tags_unit_test.go index 1341b989fe..b6236280a8 100644 --- a/tags_unit_test.go +++ b/tags_unit_test.go @@ -31,6 +31,8 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { t.Fatal(err) } img := &Image{ID: testImageID} + // FIXME: this fails on Darwin with: + // tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied if err := graph.Register(nil, archive, img); err != nil { t.Fatal(err) } diff --git a/utils.go b/utils.go index e3ba08d51c..ef666b0de1 100644 --- a/utils.go +++ b/utils.go @@ -1,320 +1,33 @@ package docker import ( - "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/namesgenerator" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" - "io" - "strconv" - "strings" - "sync/atomic" ) type Change struct { archive.Change } -// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields -// If OpenStdin is set, then it differs -func CompareConfig(a, b *Config) bool { - if a == nil || b == nil || - a.OpenStdin || b.OpenStdin { - return false - } - if a.AttachStdout != b.AttachStdout || - a.AttachStderr != b.AttachStderr || - a.User != b.User || - a.Memory != b.Memory || - a.MemorySwap != b.MemorySwap || - a.CpuShares != b.CpuShares || - a.OpenStdin != b.OpenStdin || - a.Tty != b.Tty || - a.VolumesFrom != b.VolumesFrom { - return false - } - if len(a.Cmd) != len(b.Cmd) || - len(a.Dns) != len(b.Dns) || - len(a.Env) != len(b.Env) || - len(a.PortSpecs) != len(b.PortSpecs) || - len(a.ExposedPorts) != len(b.ExposedPorts) || - len(a.Entrypoint) != len(b.Entrypoint) || - len(a.Volumes) != len(b.Volumes) { - return false - } - - for i := 0; i < len(a.Cmd); i++ { - if a.Cmd[i] != b.Cmd[i] { - return false - } - } - for i := 0; i < len(a.Dns); i++ { - if a.Dns[i] != b.Dns[i] { - return false - } - } - for i := 0; i < len(a.Env); i++ { - if a.Env[i] != b.Env[i] { - return false - } - } - for i := 0; i < len(a.PortSpecs); i++ { - if a.PortSpecs[i] != b.PortSpecs[i] { - return false - } - } - for k := range a.ExposedPorts { - if _, exists := b.ExposedPorts[k]; !exists { - return false - } - } - for i := 0; i < len(a.Entrypoint); i++ { - if a.Entrypoint[i] != b.Entrypoint[i] { - return false - } - } - for key := range a.Volumes { - if _, exists := b.Volumes[key]; !exists { - return false - } - } - return true -} - -func MergeConfig(userConf, imageConf *Config) error { - if userConf.User == "" { - userConf.User = imageConf.User - } - if userConf.Memory == 0 { - userConf.Memory = imageConf.Memory - } - if userConf.MemorySwap == 0 { - userConf.MemorySwap = imageConf.MemorySwap - } - if userConf.CpuShares == 0 { - userConf.CpuShares = imageConf.CpuShares - } - if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 { - userConf.ExposedPorts = imageConf.ExposedPorts - } else if imageConf.ExposedPorts != nil { - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(map[Port]struct{}) - } - for port := range imageConf.ExposedPorts { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - } - - if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 { - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(map[Port]struct{}) - } - ports, _, err := parsePortSpecs(userConf.PortSpecs) - if err != nil { - return err - } - for port := range ports { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - userConf.PortSpecs = nil - } - if imageConf.PortSpecs != nil && len(imageConf.PortSpecs) > 0 { - utils.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(map[Port]struct{}) - } - - ports, _, err := parsePortSpecs(imageConf.PortSpecs) - if err != nil { - return err - } - for port := range ports { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - } - if !userConf.Tty { - userConf.Tty = imageConf.Tty - } - if !userConf.OpenStdin { - userConf.OpenStdin = imageConf.OpenStdin - } - if !userConf.StdinOnce { - userConf.StdinOnce = imageConf.StdinOnce - } - if userConf.Env == nil || len(userConf.Env) == 0 { - userConf.Env = imageConf.Env - } else { - for _, imageEnv := range imageConf.Env { - found := false - imageEnvKey := strings.Split(imageEnv, "=")[0] - for _, userEnv := range userConf.Env { - userEnvKey := strings.Split(userEnv, "=")[0] - if imageEnvKey == userEnvKey { - found = true - } - } - if !found { - userConf.Env = append(userConf.Env, imageEnv) - } - } - } - if userConf.Cmd == nil || len(userConf.Cmd) == 0 { - userConf.Cmd = imageConf.Cmd - } - if userConf.Dns == nil || len(userConf.Dns) == 0 { - userConf.Dns = imageConf.Dns - } else { - //duplicates aren't an issue here - userConf.Dns = append(userConf.Dns, imageConf.Dns...) - } - if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 { - userConf.Entrypoint = imageConf.Entrypoint - } - if userConf.WorkingDir == "" { - userConf.WorkingDir = imageConf.WorkingDir - } - if userConf.VolumesFrom == "" { - userConf.VolumesFrom = imageConf.VolumesFrom - } - if userConf.Volumes == nil || len(userConf.Volumes) == 0 { - userConf.Volumes = imageConf.Volumes - } else { - for k, v := range imageConf.Volumes { - userConf.Volumes[k] = v - } - } - return nil -} - -func parseLxcConfOpts(opts ListOpts) ([]KeyValuePair, error) { - out := make([]KeyValuePair, opts.Len()) - for i, o := range opts.GetAll() { - k, v, err := parseLxcOpt(o) - if err != nil { - return nil, err - } - out[i] = KeyValuePair{Key: k, Value: v} - } - return out, nil -} - -func parseLxcOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt) - } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil -} - -// FIXME: network related stuff (including parsing) should be grouped in network file -const ( - PortSpecTemplate = "ip:hostPort:containerPort" - PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort" -) - -// We will receive port specs in the format of ip:public:private/proto and these need to be -// parsed in the internal types -func parsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - - for _, rawPort := range ports { - proto := "tcp" - - if i := strings.LastIndex(rawPort, "/"); i != -1 { - proto = rawPort[i+1:] - rawPort = rawPort[:i] - } - if !strings.Contains(rawPort, ":") { - rawPort = fmt.Sprintf("::%s", rawPort) - } else if len(strings.Split(rawPort, ":")) == 2 { - rawPort = fmt.Sprintf(":%s", rawPort) - } - - parts, err := utils.PartParser(PortSpecTemplate, rawPort) - if err != nil { - return nil, nil, err - } - - var ( - containerPort = parts["containerPort"] - rawIp = parts["ip"] - hostPort = parts["hostPort"] - ) - - if containerPort == "" { - return nil, nil, fmt.Errorf("No port specified: %s", rawPort) - } - if _, err := strconv.ParseUint(containerPort, 10, 16); err != nil { - return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - if _, err := strconv.ParseUint(hostPort, 10, 16); hostPort != "" && err != nil { - return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - - port := NewPort(proto, containerPort) - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - - binding := PortBinding{ - HostIp: rawIp, - HostPort: hostPort, - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, binding) - } - return exposedPorts, bindings, nil -} - -// Splits a port in the format of port/proto -func splitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if l == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - return parts[0], parts[1] -} - -func parsePort(rawPort string) (int, error) { - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -func migratePortMappings(config *Config, hostConfig *HostConfig) error { +func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { if config.PortSpecs != nil { - ports, bindings, err := parsePortSpecs(config.PortSpecs) + ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs) if err != nil { return err } config.PortSpecs = nil if len(bindings) > 0 { if hostConfig == nil { - hostConfig = &HostConfig{} + hostConfig = &runconfig.HostConfig{} } hostConfig.PortBindings = bindings } if config.ExposedPorts == nil { - config.ExposedPorts = make(map[Port]struct{}, len(ports)) + config.ExposedPorts = make(nat.PortSet, len(ports)) } for k, v := range ports { config.ExposedPorts[k] = v @@ -341,28 +54,3 @@ func (c *checker) Exists(name string) bool { func generateRandomName(runtime *Runtime) (string, error) { return namesgenerator.GenerateRandomName(&checker{runtime}) } - -// Read an io.Reader and call a function when it returns EOF -func EofReader(r io.Reader, callback func()) *eofReader { - return &eofReader{ - Reader: r, - callback: callback, - } -} - -type eofReader struct { - io.Reader - gotEOF int32 - callback func() -} - -func (r *eofReader) Read(p []byte) (n int, err error) { - n, err = r.Reader.Read(p) - if err == io.EOF { - // Use atomics to make the gotEOF check threadsafe - if atomic.CompareAndSwapInt32(&r.gotEOF, 0, 1) { - r.callback() - } - } - return -} diff --git a/utils/checksum.go b/utils/checksum.go new file mode 100644 index 0000000000..1c85aa63a3 --- /dev/null +++ b/utils/checksum.go @@ -0,0 +1,24 @@ +package utils + +import ( + "encoding/hex" + "hash" + "io" +) + +type CheckSum struct { + io.Reader + Hash hash.Hash +} + +func (cs *CheckSum) Read(buf []byte) (int, error) { + n, err := cs.Reader.Read(buf) + if err == nil { + cs.Hash.Write(buf[:n]) + } + return n, err +} + +func (cs *CheckSum) Sum() string { + return hex.EncodeToString(cs.Hash.Sum(nil)) +} diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go index 3e4e0f86ad..9050dda746 100644 --- a/utils/jsonmessage.go +++ b/utils/jsonmessage.go @@ -52,7 +52,7 @@ func (p *JSONProgress) String() string { } numbersBox = fmt.Sprintf("%8v/%v", current, total) - if p.Start > 0 && percentage < 50 { + if p.Current > 0 && p.Start > 0 && percentage < 50 { fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry diff --git a/utils/streamformatter.go b/utils/streamformatter.go index 9345c3cb16..8876fa5cb7 100644 --- a/utils/streamformatter.go +++ b/utils/streamformatter.go @@ -14,6 +14,10 @@ func NewStreamFormatter(json bool) *StreamFormatter { return &StreamFormatter{json, false} } +const streamNewline = "\r\n" + +var streamNewlineBytes = []byte(streamNewline) + func (sf *StreamFormatter) FormatStream(str string) []byte { sf.used = true if sf.json { @@ -21,7 +25,7 @@ func (sf *StreamFormatter) FormatStream(str string) []byte { if err != nil { return sf.FormatError(err) } - return b + return append(b, streamNewlineBytes...) } return []byte(str + "\r") } @@ -34,9 +38,9 @@ func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []b if err != nil { return sf.FormatError(err) } - return b + return append(b, streamNewlineBytes...) } - return []byte(str + "\r\n") + return []byte(str + streamNewline) } func (sf *StreamFormatter) FormatError(err error) []byte { @@ -47,11 +51,11 @@ func (sf *StreamFormatter) FormatError(err error) []byte { jsonError = &JSONError{Message: err.Error()} } if b, err := json.Marshal(&JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return b + return append(b, streamNewlineBytes...) } - return []byte("{\"error\":\"format error\"}") + return []byte("{\"error\":\"format error\"}" + streamNewline) } - return []byte("Error: " + err.Error() + "\r\n") + return []byte("Error: " + err.Error() + streamNewline) } func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgress) []byte { diff --git a/utils/tarsum.go b/utils/tarsum.go index 786196b6b4..67e94aaebc 100644 --- a/utils/tarsum.go +++ b/utils/tarsum.go @@ -1,11 +1,11 @@ package utils import ( - "archive/tar" "bytes" "compress/gzip" "crypto/sha256" "encoding/hex" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "hash" "io" "sort" diff --git a/utils/utils.go b/utils/utils.go index 542ab49702..07b8f6a3d0 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/dotcloud/docker/dockerversion" "index/suffixarray" "io" "io/ioutil" @@ -23,12 +24,6 @@ import ( "time" ) -var ( - IAMSTATIC bool // whether or not Docker itself was compiled statically via ./hack/make.sh binary - INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary - INITPATH string // custom location to search for a valid dockerinit binary (available for packagers as a last resort escape hatch) -) - // A common interface to access the Fatal method of // both testing.B and testing.T. type Fataler interface { @@ -38,7 +33,7 @@ type Fataler interface { // Go is a basic promise implementation: it wraps calls a function in a goroutine, // and returns a channel which will later return the function's return value. func Go(f func() error) chan error { - ch := make(chan error) + ch := make(chan error, 1) go func() { ch <- f() }() @@ -201,7 +196,7 @@ func isValidDockerInitPath(target string, selfPath string) bool { // target and if target == "" { return false } - if IAMSTATIC { + if dockerversion.IAMSTATIC { if selfPath == "" { return false } @@ -218,7 +213,7 @@ func isValidDockerInitPath(target string, selfPath string) bool { // target and } return os.SameFile(targetFileInfo, selfPathFileInfo) } - return INITSHA1 != "" && dockerInitSha1(target) == INITSHA1 + return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 } // Figure out the path of our dockerinit (which may be SelfPath()) @@ -230,7 +225,7 @@ func DockerInitPath(localCopy string) string { } var possibleInits = []string{ localCopy, - INITPATH, + dockerversion.INITPATH, filepath.Join(filepath.Dir(selfPath), "dockerinit"), // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." @@ -611,16 +606,22 @@ func GetKernelVersion() (*KernelVersionInfo, error) { func ParseRelease(release string) (*KernelVersionInfo, error) { var ( kernel, major, minor, parsed int - flavor string + flavor, partial string ) // Ignore error from Sscanf to allow an empty flavor. Instead, just // make sure we got all the version numbers. - parsed, _ = fmt.Sscanf(release, "%d.%d.%d%s", &kernel, &major, &minor, &flavor) - if parsed < 3 { + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { return nil, errors.New("Can't parse kernel version " + release) } + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + return &KernelVersionInfo{ Kernel: kernel, Major: major, @@ -750,7 +751,7 @@ func GetNameserversAsCIDR(resolvConf []byte) []string { } // FIXME: Change this not to receive default value as parameter -func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (string, error) { +func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { var ( proto string host string @@ -758,6 +759,8 @@ func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (s ) addr = strings.TrimSpace(addr) switch { + case addr == "tcp://": + return "", fmt.Errorf("Invalid bind address format: %s", addr) case strings.HasPrefix(addr, "unix://"): proto = "unix" addr = strings.TrimPrefix(addr, "unix://") @@ -793,12 +796,13 @@ func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (s if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { port = p } else { - port = defaultPort + return "", fmt.Errorf("Invalid bind address format: %s", addr) } + } else if proto == "tcp" && !strings.Contains(addr, ":") { + return "", fmt.Errorf("Invalid bind address format: %s", addr) } else { host = addr - port = defaultPort } if proto == "unix" { return fmt.Sprintf("%s://%s", proto, host), nil @@ -836,37 +840,6 @@ func ParseRepositoryTag(repos string) (string, string) { return repos, "" } -type User struct { - Uid string // user id - Gid string // primary group id - Username string - Name string - HomeDir string -} - -// UserLookup check if the given username or uid is present in /etc/passwd -// and returns the user struct. -// If the username is not found, an error is returned. -func UserLookup(uid string) (*User, error) { - file, err := ioutil.ReadFile("/etc/passwd") - if err != nil { - return nil, err - } - for _, line := range strings.Split(string(file), "\n") { - data := strings.Split(line, ":") - if len(data) > 5 && (data[0] == uid || data[2] == uid) { - return &User{ - Uid: data[2], - Gid: data[3], - Username: data[0], - Name: data[4], - HomeDir: data[5], - }, nil - } - } - return nil, fmt.Errorf("User not found in /etc/passwd") -} - // An StatusError reports an unsuccessful exit by a command. type StatusError struct { Status string @@ -912,16 +885,6 @@ func ShellQuoteArguments(args []string) string { return buf.String() } -func IsClosedError(err error) bool { - /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. - * See: - * http://golang.org/src/pkg/net/net.go - * https://code.google.com/p/go/issues/detail?id=4337 - * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ - */ - return strings.HasSuffix(err.Error(), "use of closed network connection") -} - func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( @@ -1015,3 +978,43 @@ func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { closer: closer, } } + +// ReplaceOrAppendValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + for _, value := range overrides { + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + return defaults +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} diff --git a/utils/utils_test.go b/utils/utils_test.go index b0a5acb170..444d2a2428 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -5,6 +5,7 @@ import ( "errors" "io" "io/ioutil" + "os" "strings" "testing" ) @@ -301,34 +302,36 @@ func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes func TestParseHost(t *testing.T) { var ( defaultHttpHost = "127.0.0.1" - defaultHttpPort = 4243 defaultUnix = "/var/run/docker.sock" ) - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "0.0.0.0"); err != nil || addr != "tcp://0.0.0.0:4243" { - t.Errorf("0.0.0.0 -> expected tcp://0.0.0.0:4243, got %s", addr) + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { + t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { + t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "udp://127.0.0.1"); err == nil { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "udp://127.0.0.1:4243"); err == nil { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:4243"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } } @@ -418,6 +421,7 @@ func TestParseRelease(t *testing.T) { assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) } func TestParsePortMapping(t *testing.T) { @@ -477,3 +481,96 @@ func StrSlicesEqual(a, b []string) bool { return true } + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/"} + o = []string{"HOME=/root", "TERM=xterm"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} diff --git a/utils_test.go b/utils_test.go index 4b8cfba39f..31fa12b6ad 100644 --- a/utils_test.go +++ b/utils_test.go @@ -1,8 +1,8 @@ package docker import ( - "archive/tar" "bytes" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" ) diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go new file mode 100644 index 0000000000..e8b973c1fa --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go @@ -0,0 +1,304 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "time" +) + +const ( + blockSize = 512 + + // Types + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + Xattrs map[string]string +} + +// File name constants from the tar spec. +const ( + fileNameSize = 100 // Maximum number of bytes in a standard tar name. + fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. +) + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeLink, TypeSymlink: + // hard link, symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +var zeroBlock = make([]byte, blockSize) + +// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. +// We compute and return both. +func checksum(header []byte) (unsigned int64, signed int64) { + for i := 0; i < len(header); i++ { + if i == 148 { + // The chksum field (header[148:156]) is special: it should be treated as space bytes. + unsigned += ' ' * 8 + signed += ' ' * 8 + i += 7 + continue + } + unsigned += int64(header[i]) + signed += int64(int8(header[i])) + } + return +} + +type slicer []byte + +func (sp *slicer) next(n int) (b []byte) { + s := *sp + b, *sp = s[0:n], s[n:] + return +} + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go new file mode 100644 index 0000000000..351eaa0e6c --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go @@ -0,0 +1,79 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar_test + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "log" + "os" +) + +func Example() { + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new tar archive. + tw := tar.NewWriter(buf) + + // Add some files to the archive. + var files = []struct { + Name, Body string + }{ + {"readme.txt", "This archive contains some text files."}, + {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, + {"todo.txt", "Get animal handling licence."}, + } + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + log.Fatalln(err) + } + if _, err := tw.Write([]byte(file.Body)); err != nil { + log.Fatalln(err) + } + } + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + log.Fatalln(err) + } + + // Open the tar archive for reading. + r := bytes.NewReader(buf.Bytes()) + tr := tar.NewReader(r) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + log.Fatalln(err) + } + fmt.Printf("Contents of %s:\n", hdr.Name) + if _, err := io.Copy(os.Stdout, tr); err != nil { + log.Fatalln(err) + } + fmt.Println() + } + + // Output: + // Contents of readme.txt: + // This archive contains some text files. + // Contents of gopher.txt: + // Gopher names: + // George + // Geoffrey + // Gonzo + // Contents of todo.txt: + // Get animal handling licence. +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go new file mode 100644 index 0000000000..7cb6e649c7 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go @@ -0,0 +1,402 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +const maxNanoSecondIntSize = 9 + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + err error + nb int64 // number of unread bytes for current file entry + pad int64 // amount of padding (ignored) after current file entry +} + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +func (tr *Reader) Next() (*Header, error) { + var hdr *Header + if tr.err == nil { + tr.skipUnread() + } + if tr.err != nil { + return hdr, tr.err + } + hdr = tr.readHeader() + if hdr == nil { + return hdr, tr.err + } + // Check for PAX/GNU header. + switch hdr.Typeflag { + case TypeXHeader: + // PAX extended header + headers, err := parsePAX(tr) + if err != nil { + return nil, err + } + // We actually read the whole file, + // but this skips alignment padding + tr.skipUnread() + hdr = tr.readHeader() + mergePAX(hdr, headers) + return hdr, nil + case TypeGNULongName: + // We have a GNU long name header. Its contents are the real file name. + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + hdr, err := tr.Next() + hdr.Name = cString(realname) + return hdr, err + case TypeGNULongLink: + // We have a GNU long link header. + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + hdr, err := tr.Next() + hdr.Linkname = cString(realname) + return hdr, err + } + return hdr, tr.err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) error { + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxGname: + hdr.Gname = v + case paxUname: + hdr.Uname = v + case paxUid: + uid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Uid = int(uid) + case paxGid: + gid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Gid = int(gid) + case paxAtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.AccessTime = t + case paxMtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ModTime = t + case paxCtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ChangeTime = t + case paxSize: + size, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Size = int64(size) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } + } + } + return nil +} + +// parsePAXTime takes a string of the form %d.%d as described in +// the PAX specification. +func parsePAXTime(t string) (time.Time, error) { + buf := []byte(t) + pos := bytes.IndexByte(buf, '.') + var seconds, nanoseconds int64 + var err error + if pos == -1 { + seconds, err = strconv.ParseInt(t, 10, 0) + if err != nil { + return time.Time{}, err + } + } else { + seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) + if err != nil { + return time.Time{}, err + } + nano_buf := string(buf[pos+1:]) + // Pad as needed before converting to a decimal. + // For example .030 -> .030000000 -> 30000000 nanoseconds + if len(nano_buf) < maxNanoSecondIntSize { + // Right pad + nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) + } else if len(nano_buf) > maxNanoSecondIntSize { + // Right truncate + nano_buf = nano_buf[:maxNanoSecondIntSize] + } + nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) + if err != nil { + return time.Time{}, err + } + } + ts := time.Unix(seconds, nanoseconds) + return ts, nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + headers := make(map[string]string) + // Each record is constructed as + // "%d %s=%s\n", length, keyword, value + for len(buf) > 0 { + // or the header was empty to start with. + var sp int + // The size field ends at the first space. + sp = bytes.IndexByte(buf, ' ') + if sp == -1 { + return nil, ErrHeader + } + // Parse the first token as a decimal integer. + n, err := strconv.ParseInt(string(buf[:sp]), 10, 0) + if err != nil { + return nil, ErrHeader + } + // Extract everything between the decimal and the n -1 on the + // beginning to to eat the ' ', -1 on the end to skip the newline. + var record []byte + record, buf = buf[sp+1:n-1], buf[n:] + // The first equals is guaranteed to mark the end of the key. + // Everything else is value. + eq := bytes.IndexByte(record, '=') + if eq == -1 { + return nil, ErrHeader + } + key, value := record[:eq], record[eq+1:] + headers[string(key)] = string(value) + } + return headers, nil +} + +// cString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func cString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +func (tr *Reader) octal(b []byte) int64 { + // Check for binary format first. + if len(b) > 0 && b[0]&0x80 != 0 { + var x int64 + for i, c := range b { + if i == 0 { + c &= 0x7f // ignore signal bit in first byte + } + x = x<<8 | int64(c) + } + return x + } + + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, err := strconv.ParseUint(cString(b), 8, 64) + if err != nil { + tr.err = err + } + return int64(x) +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding. +func (tr *Reader) skipUnread() { + nr := tr.nb + tr.pad // number of bytes to skip + tr.nb, tr.pad = 0, 0 + if sr, ok := tr.r.(io.Seeker); ok { + if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil { + return + } + } + _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr) +} + +func (tr *Reader) verifyChecksum(header []byte) bool { + if tr.err != nil { + return false + } + + given := tr.octal(header[148:156]) + unsigned, signed := checksum(header) + return given == unsigned || given == signed +} + +func (tr *Reader) readHeader() *Header { + header := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil + } + + // Two blocks of zero bytes marks the end of the archive. + if bytes.Equal(header, zeroBlock[0:blockSize]) { + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil + } + if bytes.Equal(header, zeroBlock[0:blockSize]) { + tr.err = io.EOF + } else { + tr.err = ErrHeader // zero block and then non-zero block + } + return nil + } + + if !tr.verifyChecksum(header) { + tr.err = ErrHeader + return nil + } + + // Unpack + hdr := new(Header) + s := slicer(header) + + hdr.Name = cString(s.next(100)) + hdr.Mode = tr.octal(s.next(8)) + hdr.Uid = int(tr.octal(s.next(8))) + hdr.Gid = int(tr.octal(s.next(8))) + hdr.Size = tr.octal(s.next(12)) + hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0) + s.next(8) // chksum + hdr.Typeflag = s.next(1)[0] + hdr.Linkname = cString(s.next(100)) + + // The remainder of the header depends on the value of magic. + // The original (v7) version of tar had no explicit magic field, + // so its magic bytes, like the rest of the block, are NULs. + magic := string(s.next(8)) // contains version field as well. + var format string + switch magic { + case "ustar\x0000": // POSIX tar (1003.1-1988) + if string(header[508:512]) == "tar\x00" { + format = "star" + } else { + format = "posix" + } + case "ustar \x00": // old GNU tar + format = "gnu" + } + + switch format { + case "posix", "gnu", "star": + hdr.Uname = cString(s.next(32)) + hdr.Gname = cString(s.next(32)) + devmajor := s.next(8) + devminor := s.next(8) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = tr.octal(devmajor) + hdr.Devminor = tr.octal(devminor) + } + var prefix string + switch format { + case "posix", "gnu": + prefix = cString(s.next(155)) + case "star": + prefix = cString(s.next(131)) + hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0) + hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + + if tr.err != nil { + tr.err = ErrHeader + return nil + } + + // Maximum value of hdr.Size is 64 GB (12 octal digits), + // so there's no risk of int64 overflowing. + tr.nb = int64(hdr.Size) + tr.pad = -tr.nb & (blockSize - 1) // blockSize is a power of two + + return hdr +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +func (tr *Reader) Read(b []byte) (n int, err error) { + if tr.nb == 0 { + // file consumed + return 0, io.EOF + } + + if int64(len(b)) > tr.nb { + b = b[0:tr.nb] + } + n, err = tr.r.Read(b) + tr.nb -= int64(n) + + if err == io.EOF && tr.nb > 0 { + err = io.ErrUnexpectedEOF + } + tr.err = err + return +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go new file mode 100644 index 0000000000..f84dbebe98 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go @@ -0,0 +1,425 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "os" + "reflect" + "strings" + "testing" + "time" +) + +type untarTest struct { + file string + headers []*Header + cksums []string +} + +var gnuTarTest = &untarTest{ + file: "testdata/gnu.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244428340, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244436044, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + }, + cksums: []string{ + "e38b27eaccb4391bdec553a7f3ae6b2f", + "c65bd2e50a56a2138bf1716f2fd56fe9", + }, +} + +var untarTests = []*untarTest{ + gnuTarTest, + { + file: "testdata/star.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + }, + }, + { + file: "testdata/v7.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + { + Name: "small2.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + }, + }, + { + file: "testdata/pax.tar", + headers: []*Header{ + { + Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + Mode: 0664, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 7, + ModTime: time.Unix(1350244992, 23960108), + ChangeTime: time.Unix(1350244992, 23960108), + AccessTime: time.Unix(1350244992, 23960108), + Typeflag: TypeReg, + }, + { + Name: "a/b", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 0, + ModTime: time.Unix(1350266320, 910238425), + ChangeTime: time.Unix(1350266320, 910238425), + AccessTime: time.Unix(1350266320, 910238425), + Typeflag: TypeSymlink, + Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + }, + }, + }, + { + file: "testdata/nil-uid.tar", // golang.org/issue/5290 + headers: []*Header{ + { + Name: "P1050238.JPG.log", + Mode: 0664, + Uid: 0, + Gid: 0, + Size: 14, + ModTime: time.Unix(1365454838, 0), + Typeflag: TypeReg, + Linkname: "", + Uname: "eyefi", + Gname: "eyefi", + Devmajor: 0, + Devminor: 0, + }, + }, + }, + { + file: "testdata/xattrs.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 5, + ModTime: time.Unix(1386065770, 448252320), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1389782956, 794414986), + Xattrs: map[string]string{ + "user.key": "value", + "user.key2": "value2", + // Interestingly, selinux encodes the terminating null inside the xattr + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + { + Name: "small2.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 11, + ModTime: time.Unix(1386065770, 449252304), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1386065770, 449252304), + Xattrs: map[string]string{ + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + }, + }, +} + +func TestReader(t *testing.T) { +testLoop: + for i, test := range untarTests { + f, err := os.Open(test.file) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + continue + } + defer f.Close() + tr := NewReader(f) + for j, header := range test.headers { + hdr, err := tr.Next() + if err != nil || hdr == nil { + t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err) + f.Close() + continue testLoop + } + if !reflect.DeepEqual(*hdr, *header) { + t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v", + i, j, *hdr, *header) + } + } + hdr, err := tr.Next() + if err == io.EOF { + continue testLoop + } + if hdr != nil || err != nil { + t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err) + } + } +} + +func TestPartialRead(t *testing.T) { + f, err := os.Open("testdata/gnu.tar") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + // Read the first four bytes; Next() should skip the last byte. + hdr, err := tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get first file: %v", err) + } + buf := make([]byte, 4) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Kilt"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } + + // Second file + hdr, err = tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get second file: %v", err) + } + buf = make([]byte, 6) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Google"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } +} + +func TestIncrementalRead(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + headers := test.headers + cksums := test.cksums + nread := 0 + + // loop over all files + for ; ; nread++ { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + break + } + + // check the header + if !reflect.DeepEqual(*hdr, *headers[nread]) { + t.Errorf("Incorrect header:\nhave %+v\nwant %+v", + *hdr, headers[nread]) + } + + // read file contents in little chunks EOF, + // checksumming all the way + h := md5.New() + rdbuf := make([]uint8, 8) + for { + nr, err := tr.Read(rdbuf) + if err == io.EOF { + break + } + if err != nil { + t.Errorf("Read: unexpected error %v\n", err) + break + } + h.Write(rdbuf[0:nr]) + } + // verify checksum + have := fmt.Sprintf("%x", h.Sum(nil)) + want := cksums[nread] + if want != have { + t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want) + } + } + if nread != len(headers) { + t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread) + } +} + +func TestNonSeekable(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + type readerOnly struct { + io.Reader + } + tr := NewReader(readerOnly{f}) + nread := 0 + + for ; ; nread++ { + _, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + } + + if nread != len(test.headers) { + t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread) + } +} + +func TestParsePAXHeader(t *testing.T) { + paxTests := [][3]string{ + {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths + {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length + {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}} + for _, test := range paxTests { + key, expected, raw := test[0], test[1], test[2] + reader := bytes.NewReader([]byte(raw)) + headers, err := parsePAX(reader) + if err != nil { + t.Errorf("Couldn't parse correctly formatted headers: %v", err) + continue + } + if strings.EqualFold(headers[key], expected) { + t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected) + continue + } + trailer := make([]byte, 100) + n, err := reader.Read(trailer) + if err != io.EOF || n != 0 { + t.Error("Buffer wasn't consumed") + } + } + badHeader := bytes.NewReader([]byte("3 somelongkey=")) + if _, err := parsePAX(badHeader); err != ErrHeader { + t.Fatal("Unexpected success when parsing bad header") + } +} + +func TestParsePAXTime(t *testing.T) { + // Some valid PAX time values + timestamps := map[string]time.Time{ + "1350244992.023960108": time.Unix(1350244992, 23960108), // The commoon case + "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value + "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value + "1350244992": time.Unix(1350244992, 0), // Low precision value + } + for input, expected := range timestamps { + ts, err := parsePAXTime(input) + if err != nil { + t.Fatal(err) + } + if !ts.Equal(expected) { + t.Fatalf("Time parsing failure %s %s", ts, expected) + } + } +} + +func TestMergePAX(t *testing.T) { + hdr := new(Header) + // Test a string, integer, and time based value. + headers := map[string]string{ + "path": "a/b/c", + "uid": "1000", + "mtime": "1350244992.023960108", + } + err := mergePAX(hdr, headers) + if err != nil { + t.Fatal(err) + } + want := &Header{ + Name: "a/b/c", + Uid: 1000, + ModTime: time.Unix(1350244992, 23960108), + } + if !reflect.DeepEqual(hdr, want) { + t.Errorf("incorrect merge: got %+v, want %+v", hdr, want) + } +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go new file mode 100644 index 0000000000..cf9cc79c59 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go new file mode 100644 index 0000000000..6f17dbe307 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go new file mode 100644 index 0000000000..cb843db4cf --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go new file mode 100644 index 0000000000..ed333f3ea4 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go @@ -0,0 +1,284 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "io/ioutil" + "os" + "path" + "reflect" + "strings" + "testing" + "time" +) + +func TestFileInfoHeader(t *testing.T) { + fi, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "small.txt"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(5); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } + // FileInfoHeader should error when passing nil FileInfo + if _, err := FileInfoHeader(nil, ""); err == nil { + t.Fatalf("Expected error when passing nil to FileInfoHeader") + } +} + +func TestFileInfoHeaderDir(t *testing.T) { + fi, err := os.Stat("testdata") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "testdata/"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + // Ignoring c_ISGID for golang.org/issue/4867 + if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(0); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } +} + +func TestFileInfoHeaderSymlink(t *testing.T) { + h, err := FileInfoHeader(symlink{}, "some-target") + if err != nil { + t.Fatal(err) + } + if g, e := h.Name, "some-symlink"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Linkname, "some-target"; g != e { + t.Errorf("Linkname = %q; want %q", g, e) + } +} + +type symlink struct{} + +func (symlink) Name() string { return "some-symlink" } +func (symlink) Size() int64 { return 0 } +func (symlink) Mode() os.FileMode { return os.ModeSymlink } +func (symlink) ModTime() time.Time { return time.Time{} } +func (symlink) IsDir() bool { return false } +func (symlink) Sys() interface{} { return nil } + +func TestRoundTrip(t *testing.T) { + data := []byte("some file contents") + + var b bytes.Buffer + tw := NewWriter(&b) + hdr := &Header{ + Name: "file.txt", + Uid: 1 << 21, // too big for 8 octal digits + Size: int64(len(data)), + ModTime: time.Now(), + } + // tar only supports second precision. + hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond) + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("tw.WriteHeader: %v", err) + } + if _, err := tw.Write(data); err != nil { + t.Fatalf("tw.Write: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("tw.Close: %v", err) + } + + // Read it back. + tr := NewReader(&b) + rHdr, err := tr.Next() + if err != nil { + t.Fatalf("tr.Next: %v", err) + } + if !reflect.DeepEqual(rHdr, hdr) { + t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr) + } + rData, err := ioutil.ReadAll(tr) + if err != nil { + t.Fatalf("Read: %v", err) + } + if !bytes.Equal(rData, data) { + t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data) + } +} + +type headerRoundTripTest struct { + h *Header + fm os.FileMode +} + +func TestHeaderRoundTrip(t *testing.T) { + golden := []headerRoundTripTest{ + // regular file. + { + h: &Header{ + Name: "test.txt", + Mode: 0644 | c_ISREG, + Size: 12, + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeReg, + }, + fm: 0644, + }, + // hard link. + { + h: &Header{ + Name: "hard.txt", + Mode: 0644 | c_ISLNK, + Size: 0, + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeLink, + }, + fm: 0644 | os.ModeSymlink, + }, + // symbolic link. + { + h: &Header{ + Name: "link.txt", + Mode: 0777 | c_ISLNK, + Size: 0, + ModTime: time.Unix(1360600852, 0), + Typeflag: TypeSymlink, + }, + fm: 0777 | os.ModeSymlink, + }, + // character device node. + { + h: &Header{ + Name: "dev/null", + Mode: 0666 | c_ISCHR, + Size: 0, + ModTime: time.Unix(1360578951, 0), + Typeflag: TypeChar, + }, + fm: 0666 | os.ModeDevice | os.ModeCharDevice, + }, + // block device node. + { + h: &Header{ + Name: "dev/sda", + Mode: 0660 | c_ISBLK, + Size: 0, + ModTime: time.Unix(1360578954, 0), + Typeflag: TypeBlock, + }, + fm: 0660 | os.ModeDevice, + }, + // directory. + { + h: &Header{ + Name: "dir/", + Mode: 0755 | c_ISDIR, + Size: 0, + ModTime: time.Unix(1360601116, 0), + Typeflag: TypeDir, + }, + fm: 0755 | os.ModeDir, + }, + // fifo node. + { + h: &Header{ + Name: "dev/initctl", + Mode: 0600 | c_ISFIFO, + Size: 0, + ModTime: time.Unix(1360578949, 0), + Typeflag: TypeFifo, + }, + fm: 0600 | os.ModeNamedPipe, + }, + // setuid. + { + h: &Header{ + Name: "bin/su", + Mode: 0755 | c_ISREG | c_ISUID, + Size: 23232, + ModTime: time.Unix(1355405093, 0), + Typeflag: TypeReg, + }, + fm: 0755 | os.ModeSetuid, + }, + // setguid. + { + h: &Header{ + Name: "group.txt", + Mode: 0750 | c_ISREG | c_ISGID, + Size: 0, + ModTime: time.Unix(1360602346, 0), + Typeflag: TypeReg, + }, + fm: 0750 | os.ModeSetgid, + }, + // sticky. + { + h: &Header{ + Name: "sticky.txt", + Mode: 0600 | c_ISREG | c_ISVTX, + Size: 7, + ModTime: time.Unix(1360602540, 0), + Typeflag: TypeReg, + }, + fm: 0600 | os.ModeSticky, + }, + } + + for i, g := range golden { + fi := g.h.FileInfo() + h2, err := FileInfoHeader(fi, "") + if err != nil { + t.Error(err) + continue + } + if strings.Contains(fi.Name(), "/") { + t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name()) + } + name := path.Base(g.h.Name) + if fi.IsDir() { + name += "/" + } + if got, want := h2.Name, name; got != want { + t.Errorf("i=%d: Name: got %v, want %v", i, got, want) + } + if got, want := h2.Size, g.h.Size; got != want { + t.Errorf("i=%d: Size: got %v, want %v", i, got, want) + } + if got, want := h2.Mode, g.h.Mode; got != want { + t.Errorf("i=%d: Mode: got %o, want %o", i, got, want) + } + if got, want := fi.Mode(), g.fm; got != want { + t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want) + } + if got, want := h2.ModTime, g.h.ModTime; got != want { + t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want) + } + if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h { + t.Errorf("i=%d: Sys didn't return original *Header", i) + } + } +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar new file mode 100644 index 0000000000..fc899dc8dc Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar new file mode 100644 index 0000000000..cc9cfaa33c Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar new file mode 100644 index 0000000000..9bc24b6587 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt new file mode 100644 index 0000000000..b249bfc518 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt @@ -0,0 +1 @@ +Kilts \ No newline at end of file diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt new file mode 100644 index 0000000000..394ee3ecd0 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt @@ -0,0 +1 @@ +Google.com diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar new file mode 100644 index 0000000000..59e2d4e604 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar new file mode 100644 index 0000000000..29679d9a30 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar new file mode 100644 index 0000000000..eb65fc9410 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar new file mode 100644 index 0000000000..753e883ceb Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar new file mode 100644 index 0000000000..e6d816ad07 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar new file mode 100644 index 0000000000..9701950edd Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go new file mode 100644 index 0000000000..9ee9499297 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go @@ -0,0 +1,383 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errNameTooLong = errors.New("archive/tar: name too long") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use pax header instead of binary numeric header +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +// Write s into b, terminating it with a NUL if there is room. +// If the value is too long for the field and allowPax is true add a paxheader record instead +func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) { + needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + if len(s) > len(b) { + if tw.err == nil { + tw.err = ErrFieldTooLong + } + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// Encode x as an octal ASCII string and write it into b with leading zeros. +func (tw *Writer) octal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // leading zeros, but leave room for a NUL. + for len(s)+1 < len(b) { + s = "0" + s + } + tw.cString(b, s, false, paxNone, nil) +} + +// Write x into b, either as octal or as binary (GNUtar/star extension). +// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead +func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + tw.octal(b, x) + return + } + + // If it is too long for octal, and pax is preferred, use a pax header + if allowPax && tw.preferPax { + tw.octal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + // Too big: use binary (big-endian). + tw.usedBinary = true + for i := len(b) - 1; x > 0 && i >= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // highest bit indicates binary format +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(shanemhansen): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + header := make([]byte, blockSize) + s := slicer(header) + + // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + pathHeaderBytes := s.next(fileNameSize) + + tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders) + + // Handle out of range ModTime carefully. + var modTime int64 + if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { + modTime = hdr.ModTime.Unix() + } + + tw.octal(s.next(8), hdr.Mode) // 100:108 + tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116 + tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124 + tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136 + tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity + s.next(8) // chksum (148:156) + s.next(1)[0] = hdr.Typeflag // 156:157 + + tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders) + + copy(s.next(8), []byte("ustar\x0000")) // 257:265 + tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297 + tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329 + tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337 + tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345 + + // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + prefixHeaderBytes := s.next(155) + tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix + + // Use the GNU magic instead of POSIX magic if we used any GNU extensions. + if tw.usedBinary { + copy(header[257:265], []byte("ustar \x00")) + } + + _, paxPathUsed := paxHeaders[paxPath] + // try to use a ustar header when only the name is too long + if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + suffix := hdr.Name + prefix := "" + if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) { + var err error + prefix, suffix, err = tw.splitUSTARLongName(hdr.Name) + if err == nil { + // ok we can use a ustar long name instead of pax, now correct the fields + + // remove the path field from the pax header. this will suppress the pax header + delete(paxHeaders, paxPath) + + // update the path fields + tw.cString(pathHeaderBytes, suffix, false, paxNone, nil) + tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil) + + // Use the ustar magic if we used ustar long names. + if len(prefix) > 0 { + copy(header[257:265], []byte("ustar\000")) + } + } + } + } + + // The chksum field is terminated by a NUL and a space. + // This is different from the other octal fields. + chksum, _ := checksum(header) + tw.octal(header[148:155], chksum) + header[155] = ' ' + + if tw.err != nil { + // problem with header; probably integer too big for a field. + return tw.err + } + + if allowPax { + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = int64(hdr.Size) + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header) + return tw.err +} + +// writeUSTARLongName splits a USTAR long name hdr.Name. +// name must be < 256 characters. errNameTooLong is returned +// if hdr.Name can't be split. The splitting heuristic +// is compatible with gnu tar. +func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) { + length := len(name) + if length > fileNamePrefixSize+1 { + length = fileNamePrefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + i := strings.LastIndex(name[:length], "/") + // nlen contains the resulting length in the name field. + // plen contains the resulting length in the prefix field. + nlen := len(name) - i - 1 + plen := i + if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { + err = errNameTooLong + return + } + prefix, suffix = name[:i], name[i+1:] + return +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. + pid := os.Getpid() + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, + fmt.Sprintf("PaxHeaders.%d", pid), file) + + ascii := toASCII(fullName) + if len(ascii) > 100 { + ascii = ascii[:100] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + for k, v := range paxHeaders { + fmt.Fprint(&buf, paxHeader(k+"="+v)) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// paxHeader formats a single pax record, prefixing it with the appropriate length +func paxHeader(msg string) string { + const padding = 2 // Extra padding for space and newline + size := len(msg) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s\n", size, msg) + if len(record) != size { + // Final adjustment if adding size increased + // the number of digits in size + size = len(record) + record = fmt.Sprintf("%d %s\n", size, msg) + } + return record +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteTooLong + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock) + if tw.err != nil { + break + } + } + return tw.err +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go new file mode 100644 index 0000000000..2b9ea658db --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go @@ -0,0 +1,433 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" + "testing/iotest" + "time" +) + +type writerTestEntry struct { + header *Header + contents string +} + +type writerTest struct { + file string // filename of expected output + entries []*writerTestEntry +} + +var writerTests = []*writerTest{ + // The writer test file was produced with this command: + // tar (GNU tar) 1.26 + // ln -s small.txt link.txt + // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt + { + file: "testdata/writer.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1246508266, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Kilts", + }, + { + header: &Header{ + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1245217492, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Google.com\n", + }, + { + header: &Header{ + Name: "link.txt", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Size: 0, + ModTime: time.Unix(1314603082, 0), + Typeflag: '2', + Linkname: "small.txt", + Uname: "strings", + Gname: "strings", + }, + // no contents + }, + }, + }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt + // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar + { + file: "testdata/writer-big.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "tmp/16gig.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 16 << 30, + ModTime: time.Unix(1254699560, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, + // This file was produced using gnu tar 1.17 + // gnutar -b 4 --format=ustar (longname/)*15 + file.txt + { + file: "testdata/ustar.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "file.txt", + Mode: 0644, + Uid: 0765, + Gid: 024, + Size: 06, + ModTime: time.Unix(1360135598, 0), + Typeflag: '0', + Uname: "shane", + Gname: "staff", + }, + contents: "hello\n", + }, + }, + }, +} + +// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection. +func bytestr(offset int, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("%04x ", offset) + for _, ch := range b { + switch { + case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z': + s += fmt.Sprintf(" %c", ch) + default: + s += fmt.Sprintf(" %02x", ch) + } + } + return s +} + +// Render a pseudo-diff between two blocks of bytes. +func bytediff(a []byte, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b)) + for offset := 0; len(a)+len(b) > 0; offset += rowLen { + na, nb := rowLen, rowLen + if na > len(a) { + na = len(a) + } + if nb > len(b) { + nb = len(b) + } + sa := bytestr(offset, a[0:na]) + sb := bytestr(offset, b[0:nb]) + if sa != sb { + s += fmt.Sprintf("-%v\n+%v\n", sa, sb) + } + a = a[na:] + b = b[nb:] + } + return s +} + +func TestWriter(t *testing.T) { +testLoop: + for i, test := range writerTests { + expected, err := ioutil.ReadFile(test.file) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + continue + } + + buf := new(bytes.Buffer) + tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB + big := false + for j, entry := range test.entries { + big = big || entry.header.Size > 1<<10 + if err := tw.WriteHeader(entry.header); err != nil { + t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err) + continue testLoop + } + if _, err := io.WriteString(tw, entry.contents); err != nil { + t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err) + continue testLoop + } + } + // Only interested in Close failures for the small tests. + if err := tw.Close(); err != nil && !big { + t.Errorf("test %d: Failed closing archive: %v", i, err) + continue testLoop + } + + actual := buf.Bytes() + if !bytes.Equal(expected, actual) { + t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v", + i, bytediff(expected, actual)) + } + if testing.Short() { // The second test is expensive. + break + } + } +} + +func TestPax(t *testing.T) { + // Create an archive with a large name + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + // Force a PAX long name to be written + longName := strings.Repeat("ab", 100) + contents := strings.Repeat(" ", int(hdr.Size)) + hdr.Name = longName + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long file name") + } +} + +func TestPaxSymlink(t *testing.T) { + // Create an archive with a large linkname + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeSymlink + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long linkname to be written + longLinkname := strings.Repeat("1234567890/1234567890", 10) + hdr.Linkname = longLinkname + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Linkname != longLinkname { + t.Fatal("Couldn't recover long link name") + } +} + +func TestPaxNonAscii(t *testing.T) { + // Create an archive with non ascii. These should trigger a pax header + // because pax headers have a defined utf-8 encoding. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + + // some sample data + chineseFilename := "文件名" + chineseGroupname := "組" + chineseUsername := "用戶名" + + hdr.Name = chineseFilename + hdr.Gname = chineseGroupname + hdr.Uname = chineseUsername + + contents := strings.Repeat(" ", int(hdr.Size)) + + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != chineseFilename { + t.Fatal("Couldn't recover unicode name") + } + if hdr.Gname != chineseGroupname { + t.Fatal("Couldn't recover unicode group") + } + if hdr.Uname != chineseUsername { + t.Fatal("Couldn't recover unicode user") + } +} + +func TestPaxXattrs(t *testing.T) { + xattrs := map[string]string{ + "user.key": "value", + } + + // Create an archive with an xattr + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + contents := "Kilts" + hdr.Xattrs = xattrs + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get the xattrs back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(hdr.Xattrs, xattrs) { + t.Fatalf("xattrs did not survive round trip: got %+v, want %+v", + hdr.Xattrs, xattrs) + } +} + +func TestPAXHeader(t *testing.T) { + medName := strings.Repeat("CD", 50) + longName := strings.Repeat("AB", 100) + paxTests := [][2]string{ + {paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"}, + {"a=b", "6 a=b\n"}, // Single digit length + {"a=names", "11 a=names\n"}, // Test case involving carries + {paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)}, + {paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}} + + for _, test := range paxTests { + key, expected := test[0], test[1] + if result := paxHeader(key); result != expected { + t.Fatalf("paxHeader: got %s, expected %s", result, expected) + } + } +} + +func TestUSTARLongName(t *testing.T) { + // Create an archive with a path that failed to split with USTAR extension in previous versions. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeDir + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long name to be written. The name was taken from a practical example + // that fails and replaced ever char through numbers to anonymize the sample. + longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/" + hdr.Name = longName + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long name") + } +} diff --git a/version.go b/version.go index a4288245f7..d88def9619 100644 --- a/version.go +++ b/version.go @@ -1,16 +1,13 @@ package docker import ( + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/utils" "runtime" ) -func init() { - engine.Register("version", jobVersion) -} - -func jobVersion(job *engine.Job) engine.Status { +func GetVersion(job *engine.Job) engine.Status { if _, err := dockerVersion().WriteTo(job.Stdout); err != nil { job.Errorf("%s", err) return engine.StatusErr @@ -22,8 +19,8 @@ func jobVersion(job *engine.Job) engine.Status { // environment. func dockerVersion() *engine.Env { v := &engine.Env{} - v.Set("Version", VERSION) - v.Set("GitCommit", GITCOMMIT) + v.Set("Version", dockerversion.VERSION) + v.Set("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", runtime.Version()) v.Set("Os", runtime.GOOS) v.Set("Arch", runtime.GOARCH) diff --git a/volumes.go b/volumes.go new file mode 100644 index 0000000000..9f76e3698b --- /dev/null +++ b/volumes.go @@ -0,0 +1,332 @@ +package docker + +import ( + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "syscall" +) + +type BindMap struct { + SrcPath string + DstPath string + Mode string +} + +func prepareVolumesForContainer(container *Container) error { + if container.Volumes == nil || len(container.Volumes) == 0 { + container.Volumes = make(map[string]string) + container.VolumesRW = make(map[string]bool) + if err := applyVolumesFrom(container); err != nil { + return err + } + } + + if err := createVolumes(container); err != nil { + return err + } + return nil +} + +func mountVolumesForContainer(container *Container, envPath string) error { + // Setup the root fs as a bind mount of the base fs + var ( + root = container.RootfsPath() + runtime = container.runtime + ) + if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { + return nil + } + + // Create a bind mount of the base fs as a place where we can add mounts + // without affecting the ability to access the base fs + if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { + return err + } + + // Make sure the root fs is private so the mounts here don't propagate to basefs + if err := mount.ForceMount(root, root, "none", "private"); err != nil { + return err + } + + // Mount docker specific files into the containers root fs + if err := mount.Mount(runtime.sysInitPath, filepath.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { + return err + } + if err := mount.Mount(envPath, filepath.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { + return err + } + if err := mount.Mount(container.ResolvConfPath, filepath.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { + return err + } + + if container.HostnamePath != "" && container.HostsPath != "" { + if err := mount.Mount(container.HostnamePath, filepath.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { + return err + } + if err := mount.Mount(container.HostsPath, filepath.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { + return err + } + } + + // Mount user specified volumes + for r, v := range container.Volumes { + mountAs := "ro" + if container.VolumesRW[r] { + mountAs = "rw" + } + + r = filepath.Join(root, r) + if p, err := utils.FollowSymlinkInScope(r, root); err != nil { + return err + } else { + r = p + } + + if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { + return err + } + } + return nil +} + +func unmountVolumesForContainer(container *Container) { + var ( + root = container.RootfsPath() + mounts = []string{ + root, + filepath.Join(root, "/.dockerinit"), + filepath.Join(root, "/.dockerenv"), + filepath.Join(root, "/etc/resolv.conf"), + } + ) + + if container.HostnamePath != "" && container.HostsPath != "" { + mounts = append(mounts, filepath.Join(root, "/etc/hostname"), filepath.Join(root, "/etc/hosts")) + } + + for r := range container.Volumes { + mounts = append(mounts, filepath.Join(root, r)) + } + + for i := len(mounts) - 1; i >= 0; i-- { + if lastError := mount.Unmount(mounts[i]); lastError != nil { + log.Printf("Failed to umount %v: %v", mounts[i], lastError) + } + } +} + +func applyVolumesFrom(container *Container) error { + if container.Config.VolumesFrom != "" { + for _, containerSpec := range strings.Split(container.Config.VolumesFrom, ",") { + var ( + mountRW = true + specParts = strings.SplitN(containerSpec, ":", 2) + ) + + switch len(specParts) { + case 0: + return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom) + case 2: + switch specParts[1] { + case "ro": + mountRW = false + case "rw": // mountRW is already true + default: + return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec) + } + } + + c := container.runtime.Get(specParts[0]) + if c == nil { + return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) + } + + for volPath, id := range c.Volumes { + if _, exists := container.Volumes[volPath]; exists { + continue + } + if err := os.MkdirAll(filepath.Join(container.basefs, volPath), 0755); err != nil { + return err + } + container.Volumes[volPath] = id + if isRW, exists := c.VolumesRW[volPath]; exists { + container.VolumesRW[volPath] = isRW && mountRW + } + } + + } + } + return nil +} + +func getBindMap(container *Container) (map[string]BindMap, error) { + var ( + // Create the requested bind mounts + binds = make(map[string]BindMap) + // Define illegal container destinations + illegalDsts = []string{"/", "."} + ) + + for _, bind := range container.hostConfig.Binds { + // FIXME: factorize bind parsing in parseBind + var ( + src, dst, mode string + arr = strings.Split(bind, ":") + ) + + if len(arr) == 2 { + src = arr[0] + dst = arr[1] + mode = "rw" + } else if len(arr) == 3 { + src = arr[0] + dst = arr[1] + mode = arr[2] + } else { + return nil, fmt.Errorf("Invalid bind specification: %s", bind) + } + + // Bail if trying to mount to an illegal destination + for _, illegal := range illegalDsts { + if dst == illegal { + return nil, fmt.Errorf("Illegal bind destination: %s", dst) + } + } + + bindMap := BindMap{ + SrcPath: src, + DstPath: dst, + Mode: mode, + } + binds[filepath.Clean(dst)] = bindMap + } + return binds, nil +} + +func createVolumes(container *Container) error { + binds, err := getBindMap(container) + if err != nil { + return err + } + + volumesDriver := container.runtime.volumes.driver + // Create the requested volumes if they don't exist + for volPath := range container.Config.Volumes { + volPath = filepath.Clean(volPath) + volIsDir := true + // Skip existing volumes + if _, exists := container.Volumes[volPath]; exists { + continue + } + var srcPath string + var isBindMount bool + srcRW := false + // If an external bind is defined for this volume, use that as a source + if bindMap, exists := binds[volPath]; exists { + isBindMount = true + srcPath = bindMap.SrcPath + if strings.ToLower(bindMap.Mode) == "rw" { + srcRW = true + } + if stat, err := os.Stat(bindMap.SrcPath); err != nil { + return err + } else { + volIsDir = stat.IsDir() + } + // Otherwise create an directory in $ROOT/volumes/ and use that + } else { + + // Do not pass a container as the parameter for the volume creation. + // The graph driver using the container's information ( Image ) to + // create the parent. + c, err := container.runtime.volumes.Create(nil, nil, "", "", nil) + if err != nil { + return err + } + srcPath, err = volumesDriver.Get(c.ID) + if err != nil { + return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) + } + srcRW = true // RW by default + } + + if p, err := filepath.EvalSymlinks(srcPath); err != nil { + return err + } else { + srcPath = p + } + + container.Volumes[volPath] = srcPath + container.VolumesRW[volPath] = srcRW + + // Create the mountpoint + volPath = filepath.Join(container.basefs, volPath) + rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs) + if err != nil { + return err + } + + if _, err := os.Stat(rootVolPath); err != nil { + if os.IsNotExist(err) { + if volIsDir { + if err := os.MkdirAll(rootVolPath, 0755); err != nil { + return err + } + } else { + if err := os.MkdirAll(filepath.Dir(rootVolPath), 0755); err != nil { + return err + } + if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil { + return err + } else { + f.Close() + } + } + } + } + + // Do not copy or change permissions if we are mounting from the host + if srcRW && !isBindMount { + volList, err := ioutil.ReadDir(rootVolPath) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(srcPath) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty copy files from the root into the volume + if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { + return err + } + + var stat syscall.Stat_t + if err := syscall.Stat(rootVolPath, &stat); err != nil { + return err + } + var srcStat syscall.Stat_t + if err := syscall.Stat(srcPath, &srcStat); err != nil { + return err + } + // Change the source volume's ownership if it differs from the root + // files that were just copied + if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { + if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + } + } + } + } + } + return nil +}