Merge pull request #27332 from mstanleyjones/27297

Remove contents of docs/ except for reference, point to new location
This commit is contained in:
Misty Stanley-Jones 2016-10-14 14:12:09 -07:00 committed by GitHub
commit fba04ed0ab
311 changed files with 17 additions and 30403 deletions

View file

@ -1,4 +1,4 @@
.PHONY: all binary build cross deb docs help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win .PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win
# set the graph driver as the current graphdriver if not set # set the graph driver as the current graphdriver if not set
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
@ -56,7 +56,6 @@ DOCKER_MOUNT := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_MOUNT) $(shell echo $
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",) DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD)
@ -93,8 +92,6 @@ cross: build ## cross build the binaries for darwin, freebsd and\nwindows
deb: build ## build the deb packages deb: build ## build the deb packages
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb
docs: ## build the docs
$(MAKE) -C docs docs
help: ## this help help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
@ -140,4 +137,3 @@ validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isol
win: build ## cross build the binary for windows win: build ## cross build the binary for windows
$(DOCKER_RUN_DOCKER) hack/make.sh win $(DOCKER_RUN_DOCKER) hack/make.sh win

View file

@ -219,7 +219,7 @@ We are always open to suggestions on process improvements, and are always lookin
IRC is a rich chat protocol but it can overwhelm new users. You can search IRC is a rich chat protocol but it can overwhelm new users. You can search
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>. <a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
</p> </p>
Read our <a href="https://docs.docker.com/project/get-help/#irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started. Read our <a href="https://docs.docker.com/opensource/get-help/#/irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
</td> </td>
</tr> </tr>
<tr> <tr>

2
docs/.gitignore vendored
View file

@ -1,2 +0,0 @@
# avoid committing the awsconfig file used for releases
awsconfig

View file

@ -1,6 +0,0 @@
FROM docs/base:oss
ENV PROJECT=engine
# To get the git info for this repo
COPY . /src
RUN rm -rf /docs/content/$PROJECT/
COPY . /docs/content/$PROJECT/

View file

@ -1,39 +0,0 @@
.PHONY: default docs docs-build docs-draft docs-shell test
# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
# to allow `make DOCSPORT=9000 docs`
DOCSPORT := 8000
# Get the IP ADDRESS
DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''")
HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)")
HUGO_BIND_IP=0.0.0.0
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
# for some docs workarounds (see below in "docs-build" target)
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
default: docs
docs: docs-build
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
docs-draft: docs-build
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
docs-shell: docs-build
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
test: docs-build
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)"
docs-build:
docker build -t "$(DOCKER_DOCS_IMAGE)" .

View file

@ -1,288 +1,21 @@
<!--[metadata]> # The non-reference docs have been moved!
+++
draft = true
+++
<![end-metadata]-->
# Docker Documentation The documentation for Docker Engine has been merged into
[the general documentation repo](https://github.com/docker/docker.github.io).
The source for Docker documentation is in this directory. Our See the [README](https://github.com/docker/docker.github.io/blob/master/README.md)
documentation uses extended Markdown, as implemented by for instructions on contributing to and building the documentation.
[MkDocs](http://mkdocs.org). The current release of the Docker documentation
resides on [https://docs.docker.com](https://docs.docker.com).
## Understanding the documentation branches and processes If you'd like to edit the current published version of the Engine docs,
do it in the master branch here:
https://github.com/docker/docker.github.io/tree/master/engine
Docker has two primary branches for documentation: If you need to document the functionality of an upcoming Engine release,
use the `vnext-engine` branch:
https://github.com/docker/docker.github.io/tree/vnext-engine/engine
| Branch | Description | URL (published via commit-hook) | The reference docs have been left in docker/docker (this repo), which remains
|----------|--------------------------------|------------------------------------------------------------------------------| the place to edit them.
| `docs` | Official release documentation | [https://docs.docker.com](https://docs.docker.com) |
| `master` | Merged but unreleased development work | |
Additions and updates to upcoming releases are made in a feature branch off of The docs in the general repo are open-source and we appreciate
the `master` branch. The Docker maintainers also support a `docs` branch that your feedback and pull requests!
contains the last release of documentation.
After a release, documentation updates are continually merged into `master` as
they occur. This work includes new documentation for forthcoming features, bug
fixes, and other updates.
Periodically, the Docker maintainers update `docs.docker.com` between official
releases of Docker. They do this by cherry-picking commits from `master`,
merging them into `docs`, and then publishing the result.
In the rare case where a change is not forward-compatible, changes may be made
on other branches by special arrangement with the Docker maintainers.
### Quickstart for documentation contributors
If you are a new or beginner contributor, we encourage you to read through the
[our detailed contributors
guide](https://docs.docker.com/opensource/code/). The guide explains in
detail, with examples, how to contribute. If you are an experienced contributor
this quickstart should be enough to get you started.
The following is the essential workflow for contributing to the documentation:
1. Fork the `docker/docker` repository.
2. Clone the repository to your local machine.
3. Select an issue from `docker/docker` to work on or submit a proposal of your
own.
4. Create a feature branch from `master` in which to work.
By basing from `master` your work is automatically included in the next
release. It also allows docs maintainers to easily cherry-pick your changes
into the `docs` release branch.
4. Modify existing or add new `.md` files to the `docs` directory.
5. As you work, build the documentation site locally to see your changes.
The `docker/docker` repository contains a `Dockerfile` and a `Makefile`.
Together, these create a development environment in which you can build and
run a container running the Docker documentation website. To build the
documentation site, enter `make docs` in the `docs` directory of your `docker/docker` fork:
$ make docs
.... (lots of output) ....
docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve
Running at: http://0.0.0.0:8000/
Live reload enabled.
Hold ctrl+c to quit.
The build creates an image containing all the required tools, adds the local
`docs/` directory and generates the HTML files. Then, it runs a Docker
container with this image.
The container exposes port 8000 on the localhost so that you can connect and
see your changes. If you use Docker Machine, the `docker-machine ip
<machine-name>` command gives you the address of your server.
6. Check your writing for style and mechanical errors.
Use our [documentation style
guide](https://docs.docker.com/opensource/doc-style/) to check style. There are
several [good grammar and spelling online
checkers](http://www.hemingwayapp.com/) that can check your writing
mechanics.
7. Squash your commits on your branch.
8. Make a pull request from your fork back to Docker's `master` branch.
9. Work with the reviewers until your change is approved and merged.
### Debugging and testing
If you have any issues you need to debug, you can use `make docs-shell` and then
run `mkdocs serve`. You can use `make docs-test` to generate a report of missing
links that are referenced in the documentation&mdash;there should be none.
## Style guide
If you have questions about how to write for Docker's documentation, please see
the [style guide](https://docs.docker.com/opensource/doc-style/). The style guide provides
guidance about grammar, syntax, formatting, styling, language, or tone. If
something isn't clear in the guide, please submit an issue to let us know or
submit a pull request to help us improve it.
## Publishing documentation (for Docker maintainers)
To publish Docker's documentation you need to have Docker up and running on your
machine. You'll also need a `docs/awsconfig` file containing the settings you
need to access the AWS bucket you'll be deploying to.
The process for publishing is to build first to an AWS bucket, verify the build,
and then publish the final release.
1. Have Docker installed and running on your machine.
2. Ask the core maintainers for the `awsconfig` file.
3. Copy the `awsconfig` file to the `docs/` directory.
The `awsconfig` file contains the profiles of the S3 buckets for our
documentation sites. (If needed, the release script creates an S3 bucket and
pushes the files to it.) Each profile has this format:
[profile dowideit-docs]
aws_access_key_id = IHOIUAHSIDH234rwf....
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
region = ap-southeast-2
The `profile` name must be the same as the name of the bucket you are
deploying to.
4. Call the `make` from the `docker` directory.
$ make AWS_S3_BUCKET=dowideit-docs docs-release
This publishes _only_ to the `http://bucket-url/v1.2/` version of the
documentation.
5. If you're publishing the current release's documentation, you need to also
update the root docs pages by running
$ make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release
### Errors publishing using a Docker Machine VM
Sometimes, in a Windows or Mac environment, the publishing procedure returns this
error:
Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2:
dial unix /var/run/docker.sock: no such file or directory.
If this happens, set the Docker host. Run the following command to get the
variables in your shell:
docker-machine env <machine-name>
Then, set your environment accordingly.
## Cherry-picking documentation changes to update an existing release.
Whenever the core team makes a release, they publish the documentation based on
the `release` branch. At that time, the `release` branch is copied into the
`docs` branch. The documentation team makes updates between Docker releases by
cherry-picking changes from `master` into any of the documentation branches.
Typically, we cherry-pick into the `docs` branch.
For example, to update the current release's docs, do the following:
1. Go to your `docker/docker` fork and get the latest from master.
$ git fetch upstream
2. Checkout a new branch based on `upstream/docs`.
You should give your new branch a descriptive name.
$ git checkout -b post-1.2.0-docs-update-1 upstream/docs
3. In a browser window, open [https://github.com/docker/docker/commits/master].
4. Locate the merges you want to publish.
You should only cherry-pick individual commits; do not cherry-pick merge
commits. To minimize merge conflicts, start with the oldest commit and work
your way forward in time.
5. Copy the commit SHA from GitHub.
6. Cherry-pick the commit.
$ git cherry-pick -x fe845c4
7. Repeat until you have cherry-picked everything you want to merge.
8. Push your changes to your fork.
$ git push origin post-1.2.0-docs-update-1
9. Make a pull request to merge into the `docs` branch.
Do __NOT__ merge into `master`.
10. Have maintainers review your pull request.
11. Once the PR has the needed "LGTMs", merge it on GitHub.
12. Return to your local fork and make sure you are still on the `docs` branch.
$ git checkout docs
13. Fetch your merged pull request from `docs`.
$ git fetch upstream/docs
14. Ensure your branch is clean and set to the latest.
$ git reset --hard upstream/docs
15. Copy the `awsconfig` file into the `docs` directory.
16. Make the beta documentation
$ make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
17. Open [the beta
website](http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/) site
and make sure what you published is correct.
19. When you're happy with your content, publish the docs to our live site:
$ make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes
DISTRIBUTION_ID=C2K6......FL2F docs-release
20. Test the uncached version of the live docs at [http://docs.docker.com.s3-website-us-east-1.amazonaws.com/]
### Caching and the docs
New docs do not appear live on the site until the cache (a complex, distributed
CDN system) is flushed. The `make docs-release` command flushes the cache _if_
the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID. The cache flush
can take at least 15 minutes to run and you can check its progress with the CDN
Cloudfront Purge Tool Chrome app.
## Removing files from the docs.docker.com site
Sometimes it becomes necessary to remove files from the historical published documentation.
The most reliable way to do this is to do it directly using `aws s3` commands running in a
docs container:
Start the docs container like `make docs-shell`, but bind mount in your `awsconfig`:
```
docker run --rm -it -v $(CURDIR)/docs/awsconfig:/docs/awsconfig docker-docs:master bash
```
and then the following example shows deleting 2 documents from s3, and then requesting the
CloudFlare cache to invalidate them:
```
export BUCKET BUCKET=docs.docker.com
export AWS_CONFIG_FILE=$(pwd)/awsconfig
aws s3 --profile $BUCKET ls s3://$BUCKET
aws s3 --profile $BUCKET rm s3://$BUCKET/v1.0/reference/api/docker_io_oauth_api/index.html
aws s3 --profile $BUCKET rm s3://$BUCKET/v1.1/reference/api/docker_io_oauth_api/index.html
aws configure set preview.cloudfront true
export DISTRIBUTION_ID=YUTIYUTIUTIUYTIUT
aws cloudfront create-invalidation --profile docs.docker.com --distribution-id $DISTRIBUTION_ID --invalidation-batch '{"Paths":{"Quantity":1, "Items":["/v1.0/reference/api/docker_io_oauth_api/"]},"CallerReference":"6Mar2015sventest1"}'
aws cloudfront create-invalidation --profile docs.docker.com --distribution-id $DISTRIBUTION_ID --invalidation-batch '{"Paths":{"Quantity":1, "Items":["/v1.1/reference/api/docker_io_oauth_api/"]},"CallerReference":"6Mar2015sventest1"}'
```
### Generate the man pages
For information on generating man pages (short for manual page), see the README.md
document in [the man page directory](https://github.com/docker/docker/tree/master/man)
in this project.

View file

@ -1,159 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/articles/ambassador_pattern_linking/"]
title = "Link via an ambassador container"
description = "Using the Ambassador pattern to abstract (network) services"
keywords = ["Examples, Usage, links, docker, documentation, examples, names, name, container naming"]
[menu.main]
parent = "engine_admin"
weight = 15
+++
<![end-metadata]-->
# Link via an ambassador container
Rather than hardcoding network links between a service consumer and
provider, Docker encourages service portability, for example instead of:
(consumer) --> (redis)
Requiring you to restart the `consumer` to attach it to a different
`redis` service, you can add ambassadors:
(consumer) --> (redis-ambassador) --> (redis)
Or
(consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis)
When you need to rewire your consumer to talk to a different Redis
server, you can just restart the `redis-ambassador` container that the
consumer is connected to.
This pattern also allows you to transparently move the Redis server to a
different docker host from the consumer.
Using the `svendowideit/ambassador` container, the link wiring is
controlled entirely from the `docker run` parameters.
## Two host example
Start actual Redis server on one Docker host
big-server $ docker run -d --name redis crosbymichael/redis
Then add an ambassador linked to the Redis server, mapping a port to the
outside world
big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador
On the other host, you can set up another ambassador setting environment
variables for each remote port we want to proxy to the `big-server`
client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
Then on the `client-server` host, you can use a Redis client container
to talk to the remote Redis server, just by linking to the local Redis
ambassador.
client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
## How it works
The following example shows what the `svendowideit/ambassador` container
does automatically (with a tiny amount of `sed`)
On the Docker host (192.168.1.52) that Redis will run on:
# start actual redis server
$ docker run -d --name redis crosbymichael/redis
# get a redis-cli image for connection testing
$ docker pull relateiq/redis-cli
# test the redis server by talking to it directly
$ docker run -t -i --rm --link redis:redis relateiq/redis-cli
redis 172.17.0.136:6379> ping
PONG
^D
# add redis ambassador
$ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 alpine:3.2 sh
In the `redis_ambassador` container, you can see the linked Redis
containers `env`:
/ # env
REDIS_PORT=tcp://172.17.0.136:6379
REDIS_PORT_6379_TCP_ADDR=172.17.0.136
REDIS_NAME=/redis_ambassador/redis
HOSTNAME=19d7adf4705e
SHLVL=1
HOME=/root
REDIS_PORT_6379_TCP_PORT=6379
REDIS_PORT_6379_TCP_PROTO=tcp
REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379
TERM=xterm
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/
/ # exit
This environment is used by the ambassador `socat` script to expose Redis
to the world (via the `-p 6379:6379` port mapping):
$ docker rm redis_ambassador
$ CMD="apk update && apk add socat && sh"
$ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 alpine:3.2 sh -c "$CMD"
[...]
/ # socat -t 100000000 TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
Now ping the Redis server via the ambassador:
Now go to a different server:
$ CMD="apk update && apk add socat && sh"
$ docker run -t -i --expose 6379 --name redis_ambassador alpine:3.2 sh -c "$CMD"
[...]
/ # socat -t 100000000 TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
And get the `redis-cli` image so we can talk over the ambassador bridge.
$ docker pull relateiq/redis-cli
$ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
## The svendowideit/ambassador Dockerfile
The `svendowideit/ambassador` image is based on the `alpine:3.2` image with
`socat` installed. When you start the container, it uses a small `sed`
script to parse out the (possibly multiple) link environment variables
to set up the port forwarding. On the remote host, you need to set the
variable using the `-e` command line option.
--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379
Will forward the local `1234` port to the remote IP and port, in this
case `192.168.1.52:6379`.
#
# do
# docker build -t svendowideit/ambassador .
# then to run it (on the host that has the real backend on it)
# docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador
# on the remote host, you can set up another ambassador
# docker run -t -i -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador sh
# you can read more about this process at https://docs.docker.com/articles/ambassador_pattern_linking/
# use alpine because its a minimal image with a package manager.
# prettymuch all that is needed is a container that has a functioning env and socat (or equivalent)
FROM alpine:3.2
MAINTAINER SvenDowideit@home.org.au
RUN apk update && \
apk add socat && \
rm -r /var/cache/
CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.4 KiB

View file

@ -1,165 +0,0 @@
<!--[metadata]>
+++
draft = "true"
title = "Resizing a Boot2Docker volume "
description = "Resizing a Boot2Docker volume in VirtualBox with GParted"
keywords = ["boot2docker, volume, virtualbox"]
[menu.main]
parent = "smn_win_osx"
+++
<![end-metadata]-->
# Getting "no space left on device" errors with Boot2Docker?
If you're using Boot2Docker with a large number of images, or the images you're
working with are very large, your pulls might start failing with "no space left
on device" errors when the Boot2Docker volume fills up. There are two solutions
you can try.
## Solution 1: Add the `DiskImage` property in boot2docker profile
The `boot2docker` command reads its configuration from the `$BOOT2DOCKER_PROFILE` if set, or `$BOOT2DOCKER_DIR/profile` or `$HOME/.boot2docker/profile` (on Windows this is `%USERPROFILE%/.boot2docker/profile`).
1. View the existing configuration, use the `boot2docker config` command.
$ boot2docker config
# boot2docker profile filename: /Users/mary/.boot2docker/profile
Init = false
Verbose = false
Driver = "virtualbox"
Clobber = true
ForceUpgradeDownload = false
SSH = "ssh"
SSHGen = "ssh-keygen"
SSHKey = "/Users/mary/.ssh/id_boot2docker"
VM = "boot2docker-vm"
Dir = "/Users/mary/.boot2docker"
ISOURL = "https://api.github.com/repos/boot2docker/boot2docker/releases"
ISO = "/Users/mary/.boot2docker/boot2docker.iso"
DiskSize = 20000
Memory = 2048
CPUs = 8
SSHPort = 2022
DockerPort = 0
HostIP = "192.168.59.3"
DHCPIP = "192.168.59.99"
NetMask = [255, 255, 255, 0]
LowerIP = "192.168.59.103"
UpperIP = "192.168.59.254"
DHCPEnabled = true
Serial = false
SerialFile = "/Users/mary/.boot2docker/boot2docker-vm.sock"
Waittime = 300
Retries = 75
The configuration shows you where `boot2docker` is looking for the `profile` file. It also output the settings that are in use.
2. Initialize a default file to customize using `boot2docker config > ~/.boot2docker/profile` command.
3. Add the following lines to `$HOME/.boot2docker/profile`:
# Disk image size in MB
DiskSize = 50000
4. Run the following sequence of commands to restart Boot2Docker with the new settings.
$ boot2docker poweroff
$ boot2docker destroy
$ boot2docker init
$ boot2docker up
## Solution 2: Increase the size of boot2docker volume
This solution increases the volume size by first cloning it, then resizing it
using a disk partitioning tool. We recommend
[GParted](https://sourceforge.net/projects/gparted/files/). The tool comes
as a bootable ISO, is a free download, and works well with VirtualBox.
1. Stop Boot2Docker
Issue the command to stop the Boot2Docker VM on the command line:
$ boot2docker stop
2. Clone the VMDK image to a VDI image
Boot2Docker ships with a VMDK image, which can't be resized by VirtualBox's
native tools. We will instead create a VDI volume and clone the VMDK volume to
it.
3. Using the command line VirtualBox tools, clone the VMDK image to a VDI image:
$ vboxmanage clonehd /full/path/to/boot2docker-hd.vmdk /full/path/to/<newVDIimage>.vdi --format VDI --variant Standard
4. Resize the VDI volume
Choose a size that will be appropriate for your needs. If you're spinning up a
lot of containers, or your containers are particularly large, larger will be
better:
$ vboxmanage modifyhd /full/path/to/<newVDIimage>.vdi --resize <size in MB>
5. Download a disk partitioning tool ISO
To resize the volume, we'll use [GParted](https://sourceforge.net/projects/gparted/files/).
Once you've downloaded the tool, add the ISO to the Boot2Docker VM IDE bus.
You might need to create the bus before you can add the ISO.
> **Note:**
> It's important that you choose a partitioning tool that is available as an ISO so
> that the Boot2Docker VM can be booted with it.
<table>
<tr>
<td><img src="/articles/b2d_volume_images/add_new_controller.png"><br><br></td>
</tr>
<tr>
<td><img src="/articles/b2d_volume_images/add_cd.png"></td>
</tr>
</table>
6. Add the new VDI image
In the settings for the Boot2Docker image in VirtualBox, remove the VMDK image
from the SATA controller and add the VDI image.
<img src="/articles/b2d_volume_images/add_volume.png">
7. Verify the boot order
In the **System** settings for the Boot2Docker VM, make sure that **CD/DVD** is
at the top of the **Boot Order** list.
<img src="/articles/b2d_volume_images/boot_order.png">
8. Boot to the disk partitioning ISO
Manually start the Boot2Docker VM in VirtualBox, and the disk partitioning ISO
should start up. Using GParted, choose the **GParted Live (default settings)**
option. Choose the default keyboard, language, and XWindows settings, and the
GParted tool will start up and display the VDI volume you created. Right click
on the VDI and choose **Resize/Move**.
<img src="/articles/b2d_volume_images/gparted.png">
9. Drag the slider representing the volume to the maximum available size.
10. Click **Resize/Move** followed by **Apply**.
<img src="/articles/b2d_volume_images/gparted2.png">
11. Quit GParted and shut down the VM.
12. Remove the GParted ISO from the IDE controller for the Boot2Docker VM in
VirtualBox.
13. Start the Boot2Docker VM
Fire up the Boot2Docker VM manually in VirtualBox. The VM should log in
automatically, but if it doesn't, the credentials are `docker/tcuser`. Using
the `df -h` command, verify that your changes took effect.
<img src="/articles/b2d_volume_images/verify.png">
You're done!

View file

@ -1,76 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/articles/chef/"]
title = "Using Chef"
description = "Installation and using Docker via Chef"
keywords = ["chef, installation, usage, docker, documentation"]
[menu.main]
parent = "engine_admin"
weight="11"
+++
<![end-metadata]-->
# Using Chef
> **Note**:
> Please note this is a community contributed installation path.
## Requirements
To use this guide you'll need a working installation of
[Chef](https://www.chef.io/). This cookbook supports a variety of
operating systems.
## Installation
The cookbook is available on the [Chef Supermarket](https://supermarket.chef.io/cookbooks/docker) and can be
installed using your favorite cookbook dependency manager.
The source can be found on
[GitHub](https://github.com/someara/chef-docker).
Usage
-----
- Add ```depends 'docker', '~> 2.0'``` to your cookbook's metadata.rb
- Use resources shipped in cookbook in a recipe, the same way you'd
use core Chef resources (file, template, directory, package, etc).
```ruby
docker_service 'default' do
action [:create, :start]
end
docker_image 'busybox' do
action :pull
end
docker_container 'an echo server' do
repo 'busybox'
port '1234:1234'
command "nc -ll -p 1234 -e /bin/cat"
end
```
## Getting Started
Here's a quick example of pulling the latest image and running a
container with exposed ports.
```ruby
# Pull latest image
docker_image 'nginx' do
tag 'latest'
action :pull
end
# Run container exposing ports
docker_container 'my_nginx' do
repo 'nginx'
tag 'latest'
port '80:80'
binds [ '/some/local/files/:/etc/nginx/conf.d' ]
host_name 'www'
domain_name 'computers.biz'
env 'FOO=bar'
subscribes :redeploy, 'docker_image[nginx]'
end
```

View file

@ -1,175 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/articles/dsc/"]
title = "PowerShell DSC Usage"
description = "Using DSC to configure a new Docker host"
keywords = ["powershell, dsc, installation, usage, docker, documentation"]
[menu.main]
parent = "engine_admin"
weight="10"
+++
<![end-metadata]-->
# Using PowerShell DSC
Windows PowerShell Desired State Configuration (DSC) is a configuration
management tool that extends the existing functionality of Windows PowerShell.
DSC uses a declarative syntax to define the state in which a target should be
configured. More information about PowerShell DSC can be found at
[http://technet.microsoft.com/en-us/library/dn249912.aspx](http://technet.microsoft.com/en-us/library/dn249912.aspx).
## Requirements
To use this guide you'll need a Windows host with PowerShell v4.0 or newer.
The included DSC configuration script also uses the official PPA so
only an Ubuntu target is supported. The Ubuntu target must already have the
required OMI Server and PowerShell DSC for Linux providers installed. More
information can be found at [https://github.com/MSFTOSSMgmt/WPSDSCLinux](https://github.com/MSFTOSSMgmt/WPSDSCLinux).
The source repository listed below also includes PowerShell DSC for Linux
installation and init scripts along with more detailed installation information.
## Installation
The DSC configuration example source is available in the following repository:
[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC). It can be cloned with:
$ git clone https://github.com/anweiss/DockerClientDSC.git
## Usage
The DSC configuration utilizes a set of shell scripts to determine whether or
not the specified Docker components are configured on the target node(s). The
source repository also includes a script (`RunDockerClientConfig.ps1`) that can
be used to establish the required CIM session(s) and execute the
`Set-DscConfiguration` cmdlet.
More detailed usage information can be found at
[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC).
### Install Docker
The Docker installation configuration is equivalent to running:
```
apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys\
36A1D7869245C8950F966E92D8576A8BA88D21E9
sh -c "echo deb https://apt.dockerproject.org/repo ubuntu-trusty main\
> /etc/apt/sources.list.d/docker.list"
apt-get update
apt-get install docker-engine
```
Ensure that your current working directory is set to the `DockerClientDSC`
source and load the DockerClient configuration into the current PowerShell
session
```powershell
. .\DockerClient.ps1
```
Generate the required DSC configuration .mof file for the targeted node
```powershell
DockerClient -Hostname "myhost"
```
A sample DSC configuration data file has also been included and can be modified
and used in conjunction with or in place of the `Hostname` parameter:
```powershell
DockerClient -ConfigurationData .\DockerConfigData.psd1
```
Start the configuration application process on the targeted node
```powershell
.\RunDockerClientConfig.ps1 -Hostname "myhost"
```
The `RunDockerClientConfig.ps1` script can also parse a DSC configuration data
file and execute configurations against multiple nodes as such:
```powershell
.\RunDockerClientConfig.ps1 -ConfigurationData .\DockerConfigData.psd1
```
### Images
Image configuration is equivalent to running: `docker pull [image]` or
`docker rmi -f [IMAGE]`.
Using the same steps defined above, execute `DockerClient` with the `Image`
parameter and apply the configuration:
```powershell
DockerClient -Hostname "myhost" -Image "node"
.\RunDockerClientConfig.ps1 -Hostname "myhost"
```
You can also configure the host to pull multiple images:
```powershell
DockerClient -Hostname "myhost" -Image "node","mongo"
.\RunDockerClientConfig.ps1 -Hostname "myhost"
```
To remove images, use a hashtable as follows:
```powershell
DockerClient -Hostname "myhost" -Image @{Name="node"; Remove=$true}
.\RunDockerClientConfig.ps1 -Hostname $hostname
```
### Containers
Container configuration is equivalent to running:
```
docker run -d --name="[containername]" -p '[port]' -e '[env]' --link '[link]'\
'[image]' '[command]'
```
or
```
docker rm -f [containername]
```
To create or remove containers, you can use the `Container` parameter with one
or more hashtables. The hashtable(s) passed to this parameter can have the
following properties:
- Name (required)
- Image (required unless Remove property is set to `$true`)
- Port
- Env
- Link
- Command
- Remove
For example, create a hashtable with the settings for your container:
```powershell
$webContainer = @{Name="web"; Image="anweiss/docker-platynem"; Port="80:80"}
```
Then, using the same steps defined above, execute
`DockerClient` with the `-Image` and `-Container` parameters:
```powershell
DockerClient -Hostname "myhost" -Image node -Container $webContainer
.\RunDockerClientConfig.ps1 -Hostname "myhost"
```
Existing containers can also be removed as follows:
```powershell
$containerToRemove = @{Name="web"; Remove=$true}
DockerClient -Hostname "myhost" -Container $containerToRemove
.\RunDockerClientConfig.ps1 -Hostname "myhost"
```
Here is a hashtable with all of the properties that can be used to create a
container:
```powershell
$containerProps = @{Name="web"; Image="node:latest"; Port="80:80"; `
Env="PORT=80"; Link="db:db"; Command="grunt"}
```

View file

@ -1,67 +0,0 @@
<!--[metadata]>
+++
title = "Format command and log output"
description = "CLI and log output formatting reference"
keywords = ["format, formatting, output, templates, log"]
[menu.main]
parent = "engine_admin"
weight=7
+++
<![end-metadata]-->
# Formatting reference
Docker uses [Go templates](https://golang.org/pkg/text/template/) to allow users manipulate the output format
of certain commands and log drivers. Each command a driver provides a detailed
list of elements they support in their templates:
- [Docker Images formatting](../reference/commandline/images.md#formatting)
- [Docker Inspect formatting](../reference/commandline/inspect.md#examples)
- [Docker Log Tag formatting](logging/log_tags.md)
- [Docker Network Inspect formatting](../reference/commandline/network_inspect.md)
- [Docker PS formatting](../reference/commandline/ps.md#formatting)
- [Docker Stats formatting](../reference/commandline/stats.md#formatting)
- [Docker Volume Inspect formatting](../reference/commandline/volume_inspect.md)
- [Docker Version formatting](../reference/commandline/version.md#examples)
## Template functions
Docker provides a set of basic functions to manipulate template elements.
This is the complete list of the available functions with examples:
### Join
Join concatenates a list of strings to create a single string.
It puts a separator between each element in the list.
$ docker ps --format '{{join .Names " or "}}'
### Json
Json encodes an element as a json string.
$ docker inspect --format '{{json .Mounts}}' container
### Lower
Lower turns a string into its lower case representation.
$ docker inspect --format "{{lower .Name}}" container
### Split
Split slices a string into a list of strings separated by a separator.
# docker inspect --format '{{split (join .Names "/") "/"}}' container
### Title
Title capitalizes a string.
$ docker inspect --format "{{title .Name}}" container
### Upper
Upper turms a string into its upper case representation.
$ docker inspect --format "{{upper .Name}}" container

View file

@ -1,103 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/articles/host_integration/"]
title = "Automatically start containers"
description = "How to generate scripts for upstart, systemd, etc."
keywords = ["systemd, upstart, supervisor, docker, documentation, host integration"]
[menu.main]
parent = "engine_admin"
weight="5"
+++
<![end-metadata]-->
# Automatically start containers
As of Docker 1.2,
[restart policies](../reference/run.md#restart-policies-restart) are the
built-in Docker mechanism for restarting containers when they exit. If set,
restart policies will be used when the Docker daemon starts up, as typically
happens after a system boot. Restart policies will ensure that linked containers
are started in the correct order.
If restart policies don't suit your needs (i.e., you have non-Docker processes
that depend on Docker containers), you can use a process manager like
[upstart](http://upstart.ubuntu.com/),
[systemd](http://freedesktop.org/wiki/Software/systemd/) or
[supervisor](http://supervisord.org/) instead.
## Using a process manager
Docker does not set any restart policies by default, but be aware that they will
conflict with most process managers. So don't set restart policies if you are
using a process manager.
When you have finished setting up your image and are happy with your
running container, you can then attach a process manager to manage it.
When you run `docker start -a`, Docker will automatically attach to the
running container, or start it if needed and forward all signals so that
the process manager can detect when a container stops and correctly
restart it.
Here are a few sample scripts for systemd and upstart to integrate with
Docker.
## Examples
The examples below show configuration files for two popular process managers,
upstart and systemd. In these examples, we'll assume that we have already
created a container to run Redis with `--name=redis_server`. These files define
a new service that will be started after the docker daemon service has started.
### upstart
description "Redis container"
author "Me"
start on filesystem and started docker
stop on runlevel [!2345]
respawn
script
/usr/bin/docker start -a redis_server
end script
### systemd
[Unit]
Description=Redis container
Requires=docker.service
After=docker.service
[Service]
Restart=always
ExecStart=/usr/bin/docker start -a redis_server
ExecStop=/usr/bin/docker stop -t 2 redis_server
[Install]
WantedBy=default.target
If you intend to use this as a system service, put the above contents in a file
in the `/etc/systemd/system` directory, e.g.
`/etc/systemd/system/docker-redis_server.service`.
If you need to pass options to the redis container (such as `--env`),
then you'll need to use `docker run` rather than `docker start`. This will
create a new container every time the service is started, which will be stopped
and removed when the service is stopped.
[Service]
...
ExecStart=/usr/bin/docker run --env foo=bar --name redis_server redis
ExecStop=/usr/bin/docker stop -t 2 redis_server
ExecStopPost=/usr/bin/docker rm -f redis_server
...
To start using the service, reload systemd and start the service:
systemctl daemon-reload
systemctl start docker-redis_server.service
To enable the service at system startup, execute:
systemctl enable docker-redis_server.service

View file

@ -1,283 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/engine/articles/configuring/",
"/engine/admin/configuring/"
]
title = "Configuring and running Docker"
description = "Configuring and running the Docker daemon on various distributions"
keywords = ["docker, daemon, configuration, running, process managers"]
[menu.main]
parent = "engine_admin"
weight = 0
+++
<![end-metadata]-->
# Configuring and running Docker on various distributions
After successfully installing Docker, the `docker` daemon runs with its default
configuration.
In a production environment, system administrators typically configure the
`docker` daemon to start and stop according to an organization's requirements. In most
cases, the system administrator configures a process manager such as `SysVinit`, `Upstart`,
or `systemd` to manage the `docker` daemon's start and stop.
### Running the docker daemon directly
The Docker daemon can be run directly using the `dockerd` command. By default it listens on
the Unix socket `unix:///var/run/docker.sock`
$ dockerd
INFO[0000] +job init_networkdriver()
INFO[0000] +job serveapi(unix:///var/run/docker.sock)
INFO[0000] Listening for HTTP on unix (/var/run/docker.sock)
...
...
### Configuring the docker daemon directly
If you're running the Docker daemon directly by running `dockerd` instead
of using a process manager, you can append the configuration options to the `docker` run
command directly. Other options can be passed to the Docker daemon to configure it.
Some of the daemon's options are:
| Flag | Description |
|-----------------------|-----------------------------------------------------------|
| `-D`, `--debug=false` | Enable or disable debug mode. By default, this is false. |
| `-H`,`--host=[]` | Daemon socket(s) to connect to. |
| `--tls=false` | Enable or disable TLS. By default, this is false. |
Here is an example of running the Docker daemon with configuration options:
$ dockerd -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376
These options :
- Enable `-D` (debug) mode
- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively
- Listen for connections on `tcp://192.168.59.3:2376`
The command line reference has the [complete list of daemon flags](../reference/commandline/dockerd.md)
with explanations.
### Daemon debugging
As noted above, setting the log level of the daemon to "debug" or enabling debug mode
with `-D` allows the administrator or operator to gain much more knowledge about the
runtime activity of the daemon. If faced with a non-responsive daemon, the administrator
can force a full stack trace of all threads to be added to the daemon log by sending the
`SIGUSR1` signal to the Docker daemon. A common way to send this signal is using the `kill`
command on Linux systems. For example, `kill -USR1 <daemon-pid>` sends the `SIGUSR1`
signal to the daemon process, causing the stack dump to be added to the daemon log.
> **Note:** The log level setting of the daemon must be at least "info" level and above for
> the stack trace to be saved to the logfile. By default the daemon's log level is set to
> "info".
The daemon will continue operating after handling the `SIGUSR1` signal and dumping the stack
traces to the log. The stack traces can be used to determine the state of all goroutines and
threads within the daemon.
## Ubuntu
As of `14.04`, Ubuntu uses Upstart as a process manager. By default, Upstart jobs
are located in `/etc/init` and the `docker` Upstart job can be found at `/etc/init/docker.conf`.
After successfully [installing Docker for Ubuntu](../installation/linux/ubuntulinux.md),
you can check the running status using Upstart in this way:
$ sudo status docker
docker start/running, process 989
### Running Docker
You can start/stop/restart the `docker` daemon using
$ sudo start docker
$ sudo stop docker
$ sudo restart docker
### Configuring Docker
The instructions below depict configuring Docker on a system that uses `upstart`
as the process manager. As of Ubuntu 15.04, Ubuntu uses `systemd` as its process
manager. For Ubuntu 15.04 and higher, refer to [control and configure Docker with systemd](systemd.md).
You configure the `docker` daemon in the `/etc/default/docker` file on your
system. You do this by specifying values in a `DOCKER_OPTS` variable.
To configure Docker options:
1. Log into your host as a user with `sudo` or `root` privileges.
2. If you don't have one, create the `/etc/default/docker` file on your host. Depending on how
you installed Docker, you may already have this file.
3. Open the file with your favorite editor.
```
$ sudo vi /etc/default/docker
```
4. Add a `DOCKER_OPTS` variable with the following options. These options are appended to the
`docker` daemon's run command.
```
DOCKER_OPTS="-D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376"
```
These options :
- Enable `-D` (debug) mode
- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively
- Listen for connections on `tcp://192.168.59.3:2376`
The command line reference has the [complete list of daemon flags](../reference/commandline/dockerd.md)
with explanations.
5. Save and close the file.
6. Restart the `docker` daemon.
```
$ sudo restart docker
```
7. Verify that the `docker` daemon is running as specified with the `ps` command.
```
$ ps aux | grep docker | grep -v grep
```
### Logs
By default logs for Upstart jobs are located in `/var/log/upstart` and the logs for `docker` daemon
can be located at `/var/log/upstart/docker.log`
$ tail -f /var/log/upstart/docker.log
INFO[0000] Loading containers: done.
INFO[0000] Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev
INFO[0000] +job acceptconnections()
INFO[0000] -job acceptconnections() = OK (0)
INFO[0000] Daemon has completed initialization
## CentOS / Red Hat Enterprise Linux / Fedora
As of `7.x`, CentOS and RHEL use `systemd` as the process manager. As of `21`, Fedora uses
`systemd` as its process manager.
After successfully installing Docker for [CentOS](../installation/linux/centos.md)/[Red Hat Enterprise Linux](../installation/linux/rhel.md)/[Fedora](../installation/linux/fedora.md), you can check the running status in this way:
$ sudo systemctl status docker
### Running Docker
You can start/stop/restart the `docker` daemon using
$ sudo systemctl start docker
$ sudo systemctl stop docker
$ sudo systemctl restart docker
If you want Docker to start at boot, you should also:
$ sudo systemctl enable docker
### Configuring Docker
For CentOS 7.x and RHEL 7.x you can [control and configure Docker with systemd](systemd.md).
Previously, for CentOS 6.x and RHEL 6.x you would configure the `docker` daemon in
the `/etc/sysconfig/docker` file on your system. You would do this by specifying
values in a `other_args` variable. For a short time in CentOS 7.x and RHEL 7.x you
would specify values in a `OPTIONS` variable. This is no longer recommended in favor
of using systemd directly.
For this section, we will use CentOS 7.x as an example to configure the `docker` daemon.
To configure Docker options:
1. Log into your host as a user with `sudo` or `root` privileges.
2. Create the `/etc/systemd/system/docker.service.d` directory.
```
$ sudo mkdir /etc/systemd/system/docker.service.d
```
3. Create a `/etc/systemd/system/docker.service.d/docker.conf` file.
4. Open the file with your favorite editor.
```
$ sudo vi /etc/systemd/system/docker.service.d/docker.conf
```
5. Override the `ExecStart` configuration from your `docker.service` file to customize
the `docker` daemon. To modify the `ExecStart` configuration you have to specify
an empty configuration followed by a new one as follows:
```
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H fd:// -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376
```
These options :
- Enable `-D` (debug) mode
- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively
- Listen for connections on `tcp://192.168.59.3:2376`
The command line reference has the [complete list of daemon flags](../reference/commandline/dockerd.md)
with explanations.
6. Save and close the file.
7. Flush changes.
```
$ sudo systemctl daemon-reload
```
8. Restart the `docker` daemon.
```
$ sudo systemctl restart docker
```
9. Verify that the `docker` daemon is running as specified with the `ps` command.
```
$ ps aux | grep docker | grep -v grep
```
### Logs
systemd has its own logging system called the journal. The logs for the `docker` daemon can
be viewed using `journalctl -u docker`
$ sudo journalctl -u docker
May 06 00:22:05 localhost.localdomain systemd[1]: Starting Docker Application Container Engine...
May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="+job serveapi(unix:///var/run/docker.sock)"
May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="Listening for HTTP on unix (/var/run/docker.sock)"
May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job init_networkdriver()"
May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)"
May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start."
May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done."
May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev"
May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()"
May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)"
_Note: Using and configuring journal is an advanced topic and is beyond the scope of this article._

View file

@ -1,80 +0,0 @@
<!--[metadata]>
+++
title = "Keep containers alive during daemon downtime"
description = "How to keep containers running when the daemon isn't available."
keywords = ["docker, upgrade, daemon, dockerd, live-restore, daemonless container"]
[menu.main]
parent = "engine_admin"
weight="6"
+++
<![end-metadata]-->
# Keep containers alive during daemon downtime
By default, when the Docker daemon terminates, it shuts down running containers.
Starting with Docker Engine 1.12, you can configure the daemon so that containers remain
running if the daemon becomes unavailable. The live restore option helps reduce
container downtime due to daemon crashes, planned outages, or upgrades.
## Enable the live restore option
There are two ways to enable the live restore setting to keep containers alive
when the daemon becomes unavailable:
* If the daemon is already running and you don't want to stop it, you can add
the configuration to the daemon configuration file. For example, on a linux
system the default configuration file is `/etc/docker/daemon.json`.
Use your favorite editor to enable the `live-restore` option in the
`daemon.json`.
```bash
{
"live-restore": true
}
```
You have to send a `SIGHUP` signal to the daemon process for it to reload the
configuration. For more information on how to configure the Docker daemon using
config.json, see [daemon configuration file](../reference/commandline/dockerd.md#daemon-configuration-file).
* When you start the Docker daemon, pass the `--live-restore` flag:
```bash
$ sudo dockerd --live-restore
```
## Live restore during upgrades
The live restore feature supports restoring containers to the daemon for
upgrades from one minor release to the next. For example from Docker Engine
1.12.1 to 1.13.2.
If you skip releases during an upgrade, the daemon may not restore connection
the containers. If the daemon is unable restore connection, it ignores the
running containers and you must manage them manually. The daemon won't shut down
the disconnected containers.
## Live restore upon restart
The live restore option only works to restore the same set of daemon options
as the daemon had before it stopped. For example, live restore may not work if
the daemon restarts with a different bridge IP or a different graphdriver.
## Impact of live restore on running containers
A lengthy absence of the daemon can impact running containers. The containers
process writes to FIFO logs for daemon consumption. If the daemon is unavailable
to consume the output, the buffer will fill up and block further writes to the
log. A full log blocks the process until further space is available. The default
buffer size is typically 64K.
You must restart Docker to flush the buffers.
You can modify the kernel's buffer size by changing `/proc/sys/fs/pipe-max-size`.
## Live restore and swarm mode
The live restore option is not compatible with Docker Engine swarm mode. When
the Docker Engine runs in swarm mode, the orchestration feature manages tasks
and keeps containers running according to a service specification.

View file

@ -1,90 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/reference/logging/awslogs/"]
title = "Amazon CloudWatch Logs logging driver"
description = "Describes how to use the Amazon CloudWatch Logs logging driver."
keywords = ["AWS, Amazon, CloudWatch, logging, driver"]
[menu.main]
parent = "smn_logging"
+++
<![end-metadata]-->
# Amazon CloudWatch Logs logging driver
The `awslogs` logging driver sends container logs to
[Amazon CloudWatch Logs](https://aws.amazon.com/cloudwatch/details/#log-monitoring).
Log entries can be retrieved through the [AWS Management
Console](https://console.aws.amazon.com/cloudwatch/home#logs:) or the [AWS SDKs
and Command Line Tools](http://docs.aws.amazon.com/cli/latest/reference/logs/index.html).
## Usage
You can configure the default logging driver by passing the `--log-driver`
option to the Docker daemon:
dockerd --log-driver=awslogs
You can set the logging driver for a specific container by using the
`--log-driver` option to `docker run`:
docker run --log-driver=awslogs ...
## Amazon CloudWatch Logs options
You can use the `--log-opt NAME=VALUE` flag to specify Amazon CloudWatch Logs logging driver options.
### awslogs-region
The `awslogs` logging driver sends your Docker logs to a specific region. Use
the `awslogs-region` log option or the `AWS_REGION` environment variable to set
the region. By default, if your Docker daemon is running on an EC2 instance
and no region is set, the driver uses the instance's region.
docker run --log-driver=awslogs --log-opt awslogs-region=us-east-1 ...
### awslogs-group
You must specify a
[log group](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/WhatIsCloudWatchLogs.html)
for the `awslogs` logging driver. You can specify the log group with the
`awslogs-group` log option:
docker run --log-driver=awslogs --log-opt awslogs-region=us-east-1 --log-opt awslogs-group=myLogGroup ...
### awslogs-stream
To configure which
[log stream](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/WhatIsCloudWatchLogs.html)
should be used, you can specify the `awslogs-stream` log option. If not
specified, the container ID is used as the log stream.
> **Note:**
> Log streams within a given log group should only be used by one container
> at a time. Using the same log stream for multiple containers concurrently
> can cause reduced logging performance.
## Credentials
You must provide AWS credentials to the Docker daemon to use the `awslogs`
logging driver. You can provide these credentials with the `AWS_ACCESS_KEY_ID`,
`AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN` environment variables, the
default AWS shared credentials file (`~/.aws/credentials` of the root user), or
(if you are running the Docker daemon on an Amazon EC2 instance) the Amazon EC2
instance profile.
Credentials must have a policy applied that allows the `logs:CreateLogStream`
and `logs:PutLogEvents` actions, as shown in the following example.
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": "*"
}
]
}

View file

@ -1,68 +0,0 @@
<!--[metadata]>
+++
title = "ETW logging driver"
description = "Describes how to use the etwlogs logging driver."
keywords = ["ETW, docker, logging, driver"]
[menu.main]
parent = "smn_logging"
+++
<![end-metadata]-->
# ETW logging driver
The ETW logging driver forwards container logs as ETW events.
ETW stands for Event Tracing in Windows, and is the common framework
for tracing applications in Windows. Each ETW event contains a message
with both the log and its context information. A client can then create
an ETW listener to listen to these events.
The ETW provider that this logging driver registers with Windows, has the
GUID identifier of: `{a3693192-9ed6-46d2-a981-f8226c8363bd}`. A client creates an
ETW listener and registers to listen to events from the logging driver's provider.
It does not matter the order in which the provider and listener are created.
A client can create their ETW listener and start listening for events from the provider,
before the provider has been registered with the system.
## Usage
Here is an example of how to listen to these events using the logman utility program
included in most installations of Windows:
1. `logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl`
2. Run your container(s) with the etwlogs driver, by adding `--log-driver=etwlogs`
to the Docker run command, and generate log messages.
3. `logman stop -ets DockerContainerLogs`
4. This will generate an etl file that contains the events. One way to convert this file into
human-readable form is to run: `tracerpt -y trace.etl`.
Each ETW event will contain a structured message string in this format:
container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: [stdout | stderr], log: %s
Details on each item in the message can be found below:
| Field | Description |
-----------------------|-------------------------------------------------|
| `container_name` | The container name at the time it was started. |
| `image_name` | The name of the container's image. |
| `container_id` | The full 64-character container ID. |
| `image_id` | The full ID of the container's image. |
| `source` | `stdout` or `stderr`. |
| `log` | The container log message. |
Here is an example event message:
container_name: backstabbing_spence,
image_name: windowsservercore,
container_id: f14bb55aa862d7596b03a33251c1be7dbbec8056bbdead1da8ec5ecebbe29731,
image_id: sha256:2f9e19bd998d3565b4f345ac9aaf6e3fc555406239a4fb1b1ba879673713824b,
source: stdout,
log: Hello world!
A client can parse this message string to get both the log message, as well as its
context information. Note that the time stamp is also available within the ETW event.
**Note** This ETW provider emits only a message string, and not a specially
structured ETW event. Therefore, it is not required to register a manifest file
with the system to read and interpret its ETW events.

View file

@ -1,114 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/reference/logging/fluentd/"]
title = "Fluentd logging driver"
description = "Describes how to use the fluentd logging driver."
keywords = ["Fluentd, docker, logging, driver"]
[menu.main]
parent = "smn_logging"
+++
<![end-metadata]-->
# Fluentd logging driver
The `fluentd` logging driver sends container logs to the
[Fluentd](http://www.fluentd.org/) collector as structured log data. Then, users
can use any of the [various output plugins of
Fluentd](http://www.fluentd.org/plugins) to write these logs to various
destinations.
In addition to the log message itself, the `fluentd` log
driver sends the following metadata in the structured log message:
| Field | Description |
-------------------|-------------------------------------|
| `container_id` | The full 64-character container ID. |
| `container_name` | The container name at the time it was started. If you use `docker rename` to rename a container, the new name is not reflected in the journal entries. |
| `source` | `stdout` or `stderr` |
The `docker logs` command is not available for this logging driver.
## Usage
Some options are supported by specifying `--log-opt` as many times as needed:
- `fluentd-address`: specify `host:port` to connect `localhost:24224`
- `tag`: specify tag for fluentd message, which interpret some markup, ex `{{.ID}}`, `{{.FullID}}` or `{{.Name}}` `docker.{{.ID}}`
Configure the default logging driver by passing the
`--log-driver` option to the Docker daemon:
dockerd --log-driver=fluentd
To set the logging driver for a specific container, pass the
`--log-driver` option to `docker run`:
docker run --log-driver=fluentd ...
Before using this logging driver, launch a Fluentd daemon. The logging driver
connects to this daemon through `localhost:24224` by default. Use the
`fluentd-address` option to connect to a different address.
docker run --log-driver=fluentd --log-opt fluentd-address=myhost.local:24224
If container cannot connect to the Fluentd daemon, the container stops
immediately unless the `fluentd-async-connect` option is used.
## Options
Users can use the `--log-opt NAME=VALUE` flag to specify additional Fluentd logging driver options.
### fluentd-address
By default, the logging driver connects to `localhost:24224`. Supply the
`fluentd-address` option to connect to a different address.
docker run --log-driver=fluentd --log-opt fluentd-address=myhost.local:24224
### tag
By default, Docker uses the first 12 characters of the container ID to tag log messages.
Refer to the [log tag option documentation](log_tags.md) for customizing
the log tag format.
### labels and env
The `labels` and `env` options each take a comma-separated list of keys. If there is collision between `label` and `env` keys, the value of the `env` takes precedence. Both options add additional fields to the extra attributes of a logging message.
### fluentd-async-connect
Docker connects to Fluentd in the background. Messages are buffered until the connection is established.
## Fluentd daemon management with Docker
About `Fluentd` itself, see [the project webpage](http://www.fluentd.org)
and [its documents](http://docs.fluentd.org/).
To use this logging driver, start the `fluentd` daemon on a host. We recommend
that you use [the Fluentd docker
image](https://hub.docker.com/r/fluent/fluentd/). This image is
especially useful if you want to aggregate multiple container logs on each
host then, later, transfer the logs to another Fluentd node to create an
aggregate store.
### Testing container loggers
1. Write a configuration file (`test.conf`) to dump input logs:
<source>
@type forward
</source>
<match docker.**>
@type stdout
</match>
2. Launch Fluentd container with this configuration file:
$ docker run -it -p 24224:24224 -v /path/to/conf/test.conf:/fluentd/etc/test.conf -e FLUENTD_CONF=test.conf fluent/fluentd:latest
3. Start one or more containers with the `fluentd` logging driver:
$ docker run --log-driver=fluentd your/application

View file

@ -1,85 +0,0 @@
<!--[metadata]>
+++
title = "Google Cloud Logging driver"
description = "Describes how to use the Google Cloud Logging driver."
keywords = ["gcplogs, google, docker, logging, driver"]
[menu.main]
parent = "smn_logging"
+++
<![end-metadata]-->
# Google Cloud Logging driver
The Google Cloud Logging driver sends container logs to <a href="https://cloud.google.com/logging/docs/" target="_blank">Google Cloud
Logging</a>.
## Usage
You can configure the default logging driver by passing the `--log-driver`
option to the Docker daemon:
dockerd --log-driver=gcplogs
You can set the logging driver for a specific container by using the
`--log-driver` option to `docker run`:
docker run --log-driver=gcplogs ...
This log driver does not implement a reader so it is incompatible with
`docker logs`.
If Docker detects that it is running in a Google Cloud Project, it will discover configuration
from the <a href="https://cloud.google.com/compute/docs/metadata" target="_blank">instance metadata service</a>.
Otherwise, the user must specify which project to log to using the `--gcp-project`
log option and Docker will attempt to obtain credentials from the
<a href="https://developers.google.com/identity/protocols/application-default-credentials" target="_blank">Google Application Default Credential</a>.
The `--gcp-project` takes precedence over information discovered from the metadata server
so a Docker daemon running in a Google Cloud Project can be overridden to log to a different
Google Cloud Project using `--gcp-project`.
Docker fetches the values for zone, instance name and instance id from Google
Cloud metadata server. Those values can be provided via options if metadata
server is not available. They will not override the values from metadata server.
## gcplogs options
You can use the `--log-opt NAME=VALUE` flag to specify these additional Google
Cloud Logging driver options:
| Option | Required | Description |
|-----------------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------|
| `gcp-project` | optional | Which GCP project to log to. Defaults to discovering this value from the GCE metadata service. |
| `gcp-log-cmd` | optional | Whether to log the command that the container was started with. Defaults to false. |
| `labels` | optional | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for container. |
| `env` | optional | Comma-separated list of keys of environment variables, which should be included in message, if these variables are specified for container. |
| `gcp-meta-zone` | optional | Zone name for the instance. |
| `gcp-meta-name` | optional | Instance name. |
| `gcp-meta-id` | optional | Instance ID. |
If there is collision between `label` and `env` keys, the value of the `env`
takes precedence. Both options add additional fields to the attributes of a
logging message.
Below is an example of the logging options required to log to the default
logging destination which is discovered by querying the GCE metadata server.
docker run --log-driver=gcplogs \
--log-opt labels=location \
--log-opt env=TEST \
--log-opt gcp-log-cmd=true \
--env "TEST=false" \
--label location=west \
your/application
This configuration also directs the driver to include in the payload the label
`location`, the environment variable `ENV`, and the command used to start the
container.
An example of the logging options for running outside of GCE (the daemon must be
configured with GOOGLE_APPLICATION_CREDENTIALS):
docker run --log-driver=gcplogs \
--log-opt gcp-project=test-project
--log-opt gcp-meta-zone=west1 \
--log-opt gcp-meta-name=`hostname` \
your/application

View file

@ -1,24 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/reference/logging/"]
title = "Logging"
description = "Logging and Logging Drivers"
keywords = [" docker, logging, driver"]
[menu.main]
parent = "engine_admin"
identifier = "smn_logging"
weight=9
+++
<![end-metadata]-->
# Logging Drivers
* [Configuring logging drivers](overview.md)
* [Configuring log tags](log_tags.md)
* [Fluentd logging driver](fluentd.md)
* [Journald logging driver](journald.md)
* [Amazon CloudWatch Logs logging driver](awslogs.md)
* [Splunk logging driver](splunk.md)
* [ETW logging driver](etwlogs.md)
* [Google Cloud Logging driver](gcplogs.md)

View file

@ -1,91 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/reference/logging/journald/"]
title = "Journald logging driver"
description = "Describes how to use the fluentd logging driver."
keywords = ["Journald, docker, logging, driver"]
[menu.main]
parent = "smn_logging"
+++
<![end-metadata]-->
# Journald logging driver
The `journald` logging driver sends container logs to the [systemd
journal](http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html). Log entries can be retrieved using the `journalctl`
command, through use of the journal API, or using the `docker logs` command.
In addition to the text of the log message itself, the `journald` log
driver stores the following metadata in the journal with each message:
| Field | Description |
----------------------|-------------|
| `CONTAINER_ID` | The container ID truncated to 12 characters. |
| `CONTAINER_ID_FULL` | The full 64-character container ID. |
| `CONTAINER_NAME` | The container name at the time it was started. If you use `docker rename` to rename a container, the new name is not reflected in the journal entries. |
| `CONTAINER_TAG` | The container tag ([log tag option documentation](log_tags.md)). |
## Usage
You can configure the default logging driver by passing the
`--log-driver` option to the Docker daemon:
dockerd --log-driver=journald
You can set the logging driver for a specific container by using the
`--log-driver` option to `docker run`:
docker run --log-driver=journald ...
## Options
Users can use the `--log-opt NAME=VALUE` flag to specify additional
journald logging driver options.
### tag
Specify template to set `CONTAINER_TAG` value in journald logs. Refer to
[log tag option documentation](log_tags.md) for customizing the log tag format.
### labels and env
The `labels` and `env` options each take a comma-separated list of keys. If there is collision between `label` and `env` keys, the value of the `env` takes precedence. Both options add additional metadata in the journal with each message.
## Note regarding container names
The value logged in the `CONTAINER_NAME` field is the container name
that was set at startup. If you use `docker rename` to rename a
container, the new name will not be reflected in the journal entries.
Journal entries will continue to use the original name.
## Retrieving log messages with journalctl
You can use the `journalctl` command to retrieve log messages. You
can apply filter expressions to limit the retrieved messages to a
specific container. For example, to retrieve all log messages from a
container referenced by name:
# journalctl CONTAINER_NAME=webserver
You can make use of additional filters to further limit the messages
retrieved. For example, to see just those messages generated since
the system last booted:
# journalctl -b CONTAINER_NAME=webserver
Or to retrieve log messages in JSON format with complete metadata:
# journalctl -o json CONTAINER_NAME=webserver
## Retrieving log messages with the journal API
This example uses the `systemd` Python module to retrieve container
logs:
import systemd.journal
reader = systemd.journal.Reader()
reader.add_match('CONTAINER_NAME=web')
for msg in reader:
print '{CONTAINER_ID_FULL}: {MESSAGE}'.format(**msg)

View file

@ -1,67 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/reference/logging/log_tags/"]
title = "Log tags for logging driver"
description = "Describes how to format tags for."
keywords = ["docker, logging, driver, syslog, Fluentd, gelf, journald"]
[menu.main]
parent = "smn_logging"
weight = -1
+++
<![end-metadata]-->
# Log Tags
The `tag` log option specifies how to format a tag that identifies the
container's log messages. By default, the system uses the first 12 characters of
the container id. To override this behavior, specify a `tag` option:
```
docker run --log-driver=fluentd --log-opt fluentd-address=myhost.local:24224 --log-opt tag="mailer"
```
Docker supports some special template markup you can use when specifying a tag's value:
| Markup | Description |
|--------------------|------------------------------------------------------|
| `{{.ID}}` | The first 12 characters of the container id. |
| `{{.FullID}}` | The full container id. |
| `{{.Name}}` | The container name. |
| `{{.ImageID}}` | The first 12 characters of the container's image id. |
| `{{.ImageFullID}}` | The container's full image identifier. |
| `{{.ImageName}}` | The name of the image used by the container. |
| `{{.DaemonName}}` | The name of the docker program (`docker`). |
For example, specifying a `--log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"` value yields `syslog` log lines like:
```
Aug 7 18:33:19 HOSTNAME docker/hello-world/foobar/5790672ab6a0[9103]: Hello from Docker.
```
At startup time, the system sets the `container_name` field and `{{.Name}}` in
the tags. If you use `docker rename` to rename a container, the new name is not
reflected in the log messages. Instead, these messages continue to use the
original container name.
For advanced usage, the generated tag's use [go
templates](http://golang.org/pkg/text/template/) and the container's [logging
context](https://github.com/docker/docker/blob/master/daemon/logger/context.go).
As an example of what is possible with the syslog logger:
```
$ docker run -it --rm \
--log-driver syslog \
--log-opt tag="{{ (.ExtraAttributes nil).SOME_ENV_VAR }}" \
--log-opt env=SOME_ENV_VAR \
-e SOME_ENV_VAR=logtester.1234 \
flyinprogrammer/logtester
```
Results in logs like this:
```
Apr 1 15:22:17 ip-10-27-39-73 docker/logtester.1234[45499]: + exec app
Apr 1 15:22:17 ip-10-27-39-73 docker/logtester.1234[45499]: 2016-04-01 15:22:17.075416751 +0000 UTC stderr msg: 1
```

View file

@ -1,307 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/reference/logging/overview/"]
title = "Configuring Logging Drivers"
description = "Configure logging driver."
keywords = ["docker, logging, driver, Fluentd"]
[menu.main]
parent = "smn_logging"
weight=-99
+++
<![end-metadata]-->
# Configure logging drivers
The container can have a different logging driver than the Docker daemon. Use
the `--log-driver=VALUE` with the `docker run` command to configure the
container's logging driver. If the `--log-driver` option is not set, docker
uses the default (`json-file`) logging driver. The following options are
supported:
| Driver | Description |
|-------------|-------------------------------------------------------------------------------------------------------------------------------|
| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. |
| `json-file` | Default logging driver for Docker. Writes JSON messages to file. |
| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. |
| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. |
| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint like Graylog or Logstash. |
| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). |
| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs. |
| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using HTTP Event Collector. |
| `etwlogs` | ETW logging driver for Docker on Windows. Writes log messages as ETW events. |
| `gcplogs` | Google Cloud Logging driver for Docker. Writes log messages to Google Cloud Logging. |
The `docker logs`command is available only for the `json-file` and `journald`
logging drivers.
The `labels` and `env` options add additional attributes for use with logging
drivers that accept them. Each option takes a comma-separated list of keys. If
there is collision between `label` and `env` keys, the value of the `env` takes
precedence.
To use attributes, specify them when you start the Docker daemon. For example,
to manually start the daemon with the `json-file` driver, and include additional
attributes in the output, run the following command:
```bash
$ dockerd \
--log-driver=json-file \
--log-opt labels=foo \
--log-opt env=foo,fizz
```
Then, run a container and specify values for the `labels` or `env`. For
example, you might use this:
```bash
$ docker run -dit --label foo=bar -e fizz=buzz alpine sh
```
This adds additional fields to the log depending on the driver, e.g. for
`json-file` that looks like:
```json
"attrs":{"fizz":"buzz","foo":"bar"}
```
## json-file options
The following logging options are supported for the `json-file` logging driver:
```bash
--log-opt max-size=[0-9]+[kmg]
--log-opt max-file=[0-9]+
--log-opt labels=label1,label2
--log-opt env=env1,env2
```
Logs that reach `max-size` are rolled over. You can set the size in
kilobytes(k), megabytes(m), or gigabytes(g). eg `--log-opt max-size=50m`. If
`max-size` is not set, then logs are not rolled over.
`max-file` specifies the maximum number of files that a log is rolled over
before being discarded. eg `--log-opt max-file=100`. If `max-size` is not set,
then `max-file` is not honored.
If `max-size` and `max-file` are set, `docker logs` only returns the log lines
from the newest log file.
## syslog options
The following logging options are supported for the `syslog` logging driver:
```bash
--log-opt syslog-address=[tcp|udp|tcp+tls]://host:port
--log-opt syslog-address=unix://path
--log-opt syslog-address=unixgram://path
--log-opt syslog-facility=daemon
--log-opt syslog-tls-ca-cert=/etc/ca-certificates/custom/ca.pem
--log-opt syslog-tls-cert=/etc/ca-certificates/custom/cert.pem
--log-opt syslog-tls-key=/etc/ca-certificates/custom/key.pem
--log-opt syslog-tls-skip-verify=true
--log-opt tag="mailer"
--log-opt syslog-format=[rfc5424|rfc5424micro|rfc3164]
--log-opt env=ENV1,ENV2,ENV3
--log-opt labels=label1,label2,label3
```
`syslog-address` specifies the remote syslog server address where the driver
connects to. If not specified it defaults to the local unix socket of the
running system. If transport is either `tcp` or `udp` and `port` is not
specified it defaults to `514` The following example shows how to have the
`syslog` driver connect to a `syslog` remote server at `192.168.0.42` on port
`123`
```bash
$ docker run --log-driver=syslog --log-opt syslog-address=tcp://192.168.0.42:123
```
The `syslog-facility` option configures the syslog facility. By default, the
system uses the `daemon` value. To override this behavior, you can provide an
integer of 0 to 23 or any of the following named facilities:
* `kern`
* `user`
* `mail`
* `daemon`
* `auth`
* `syslog`
* `lpr`
* `news`
* `uucp`
* `cron`
* `authpriv`
* `ftp`
* `local0`
* `local1`
* `local2`
* `local3`
* `local4`
* `local5`
* `local6`
* `local7`
`syslog-tls-ca-cert` specifies the absolute path to the trust certificates
signed by the CA. This option is ignored if the address protocol is not
`tcp+tls`.
`syslog-tls-cert` specifies the absolute path to the TLS certificate file. This
option is ignored if the address protocol is not `tcp+tls`.
`syslog-tls-key` specifies the absolute path to the TLS key file. This option
is ignored if the address protocol is not `tcp+tls`.
`syslog-tls-skip-verify` configures the TLS verification. This verification is
enabled by default, but it can be overridden by setting this option to `true`.
This option is ignored if the address protocol is not `tcp+tls`.
`tag` configures a string that is appended to the APP-NAME in the syslog
message. By default, Docker uses the first 12 characters of the container ID to
tag log messages. Refer to the [log tag option documentation](log_tags.md) for
customizing the log tag format.
`syslog-format` specifies syslog message format to use when logging. If not
specified it defaults to the local unix syslog format without hostname
specification. Specify rfc3164 to perform logging in RFC-3164 compatible
format. Specify rfc5424 to perform logging in RFC-5424 compatible format.
Specify rfc5424micro to perform logging in RFC-5424 compatible format with
microsecond timestamp resolution.
`env` is a comma-separated list of keys of environment variables. Used for
advanced [log tag options](log_tags.md).
`labels` is a comma-separated list of keys of labels. Used for advanced [log
tag options](log_tags.md).
## journald options
The `journald` logging driver stores the container id in the journal's
`CONTAINER_ID` field. For detailed information on working with this logging
driver, see [the journald logging driver](journald.md) reference documentation.
## GELF options
The GELF logging driver supports the following options:
```bash
--log-opt gelf-address=udp://host:port
--log-opt tag="database"
--log-opt labels=label1,label2
--log-opt env=env1,env2
--log-opt gelf-compression-type=gzip
--log-opt gelf-compression-level=1
```
The `gelf-address` option specifies the remote GELF server address that the
driver connects to. Currently, only `udp` is supported as the transport and you
must specify a `port` value. The following example shows how to connect the
`gelf` driver to a GELF remote server at `192.168.0.42` on port `12201`
```bash
$ docker run -dit \
--log-driver=gelf \
--log-opt gelf-address=udp://192.168.0.42:12201 \
alpine sh
```
By default, Docker uses the first 12 characters of the container ID to tag log
messages. Refer to the [log tag option documentation](log_tags.md) for
customizing the log tag format.
The `labels` and `env` options are supported by the gelf logging
driver. It adds additional key on the `extra` fields, prefixed by an
underscore (`_`).
// […]
"_foo": "bar",
"_fizz": "buzz",
// […]
The `gelf-compression-type` option can be used to change how the GELF driver
compresses each log message. The accepted values are `gzip`, `zlib` and `none`.
`gzip` is chosen by default.
The `gelf-compression-level` option can be used to change the level of
compresssion when `gzip` or `zlib` is selected as `gelf-compression-type`.
Accepted value must be from from -1 to 9 (BestCompression). Higher levels
typically run slower but compress more. Default value is 1 (BestSpeed).
## Fluentd options
You can use the `--log-opt NAME=VALUE` flag to specify these additional Fluentd
logging driver options.
- `fluentd-address`: specify `host:port` to connect [localhost:24224]
- `tag`: specify tag for `fluentd` message
- `fluentd-buffer-limit`: specify the maximum size of the fluentd log buffer [8MB]
- `fluentd-retry-wait`: initial delay before a connection retry (after which it increases exponentially) [1000ms]
- `fluentd-max-retries`: maximum number of connection retries before abrupt failure of docker [1073741824]
- `fluentd-async-connect`: whether to block on initial connection or not [false]
For example, to specify both additional options:
```bash
$ docker run -dit \
--log-driver=fluentd \
--log-opt fluentd-address=localhost:24224 \
--log-opt tag="docker.{{.Name}}" \
alpine sh
```
If container cannot connect to the Fluentd daemon on the specified address and
`fluentd-async-connect` is not enabled, the container stops immediately.
For detailed information on working with this logging driver,
see [the fluentd logging driver](fluentd.md)
## Amazon CloudWatch Logs options
The Amazon CloudWatch Logs logging driver supports the following options:
```bash
--log-opt awslogs-region=<aws_region>
--log-opt awslogs-group=<log_group_name>
--log-opt awslogs-stream=<log_stream_name>
```
For detailed information on working with this logging driver, see [the awslogs
logging driver](awslogs.md) reference documentation.
## Splunk options
The Splunk logging driver requires the following options:
```bash
--log-opt splunk-token=<splunk_http_event_collector_token>
--log-opt splunk-url=https://your_splunk_instance:8088
```
For detailed information about working with this logging driver, see the
[Splunk logging driver](splunk.md) reference documentation.
## ETW logging driver options
The etwlogs logging driver does not require any options to be specified. This
logging driver forwards each log message as an ETW event. An ETW listener
can then be created to listen for these events.
The ETW logging driver is only available on Windows. For detailed information
on working with this logging driver, see [the ETW logging driver](etwlogs.md)
reference documentation.
## Google Cloud Logging options
The Google Cloud Logging driver supports the following options:
```bash
--log-opt gcp-project=<gcp_projext>
--log-opt labels=<label1>,<label2>
--log-opt env=<envvar1>,<envvar2>
--log-opt log-cmd=true
```
For detailed information about working with this logging driver, see the
[Google Cloud Logging driver](gcplogs.md). reference documentation.

View file

@ -1,147 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/reference/logging/splunk/"]
title = "Splunk logging driver"
description = "Describes how to use the Splunk logging driver."
keywords = ["splunk, docker, logging, driver"]
[menu.main]
parent = "smn_logging"
+++
<![end-metadata]-->
# Splunk logging driver
The `splunk` logging driver sends container logs to
[HTTP Event Collector](http://dev.splunk.com/view/event-collector/SP-CAAAE6M)
in Splunk Enterprise and Splunk Cloud.
## Usage
You can configure the default logging driver by passing the `--log-driver`
option to the Docker daemon:
dockerd --log-driver=splunk
You can set the logging driver for a specific container by using the
`--log-driver` option to `docker run`:
docker run --log-driver=splunk ...
## Splunk options
You can use the `--log-opt NAME=VALUE` flag to specify these additional Splunk
logging driver options:
| Option | Required | Description |
|-----------------------------|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `splunk-token` | required | Splunk HTTP Event Collector token. |
| `splunk-url` | required | Path to your Splunk Enterprise or Splunk Cloud instance (including port and scheme used by HTTP Event Collector) `https://your_splunk_instance:8088`. |
| `splunk-source` | optional | Event source. |
| `splunk-sourcetype` | optional | Event source type. |
| `splunk-index` | optional | Event index. |
| `splunk-capath` | optional | Path to root certificate. |
| `splunk-caname` | optional | Name to use for validating server certificate; by default the hostname of the `splunk-url` will be used. |
| `splunk-insecureskipverify` | optional | Ignore server certificate validation. |
| `splunk-format` | optional | Message format. Can be `inline`, `json` or `raw`. Defaults to `inline`. |
| `splunk-verify-connection` | optional | Verify on start, that docker can connect to Splunk server. Defaults to true. |
| `splunk-gzip` | optional | Enable/disable gzip compression to send events to Splunk Enterprise or Splunk Cloud instance. Defaults to false. |
| `splunk-gzip-level` | optional | Set compression level for gzip. Valid values are -1 (default), 0 (no compression), 1 (best speed) ... 9 (best compression). Defaults to [DefaultCompression](https://golang.org/pkg/compress/gzip/#DefaultCompression). |
| `tag` | optional | Specify tag for message, which interpret some markup. Default value is `{{.ID}}` (12 characters of the container ID). Refer to the [log tag option documentation](log_tags.md) for customizing the log tag format. |
| `labels` | optional | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for container. |
| `env` | optional | Comma-separated list of keys of environment variables, which should be included in message, if these variables are specified for container. |
If there is collision between `label` and `env` keys, the value of the `env` takes precedence.
Both options add additional fields to the attributes of a logging message.
Below is an example of the logging option specified for the Splunk Enterprise
instance. The instance is installed locally on the same machine on which the
Docker daemon is running. The path to the root certificate and Common Name is
specified using an HTTPS scheme. This is used for verification.
The `SplunkServerDefaultCert` is automatically generated by Splunk certificates.
docker run --log-driver=splunk \
--log-opt splunk-token=176FCEBF-4CF5-4EDF-91BC-703796522D20 \
--log-opt splunk-url=https://splunkhost:8088 \
--log-opt splunk-capath=/path/to/cert/cacert.pem \
--log-opt splunk-caname=SplunkServerDefaultCert
--log-opt tag="{{.Name}}/{{.FullID}}"
--log-opt labels=location
--log-opt env=TEST
--env "TEST=false"
--label location=west
your/application
### Message formats
By default Logging Driver sends messages as `inline` format, where each message
will be embedded as a string, for example
```
{
"attrs": {
"env1": "val1",
"label1": "label1"
},
"tag": "MyImage/MyContainer",
"source": "stdout",
"line": "my message"
}
{
"attrs": {
"env1": "val1",
"label1": "label1"
},
"tag": "MyImage/MyContainer",
"source": "stdout",
"line": "{\"foo\": \"bar\"}"
}
```
In case if your messages are JSON objects you may want to embed them in the
message we send to Splunk. By specifying `--log-opt splunk-format=json` driver
will try to parse every line as a JSON object and send it as embedded object. In
case if it cannot parse it - message will be send as `inline`. For example
```
{
"attrs": {
"env1": "val1",
"label1": "label1"
},
"tag": "MyImage/MyContainer",
"source": "stdout",
"line": "my message"
}
{
"attrs": {
"env1": "val1",
"label1": "label1"
},
"tag": "MyImage/MyContainer",
"source": "stdout",
"line": {
"foo": "bar"
}
}
```
Third format is a `raw` message. You can specify it by using
`--log-opt splunk-format=raw`. Attributes (environment variables and labels) and
tag will be prefixed to the message. For example
```
MyImage/MyContainer env1=val1 label1=label1 my message
MyImage/MyContainer env1=val1 label1=label1 {"foo": "bar"}
```
## Advanced options
Splunk Logging Driver allows you to configure few advanced options by specifying next environment variables for the Docker daemon.
| Environment variable name | Default value | Description |
|--------------------------------------------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
| `SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY` | `5s` | If there is nothing to batch how often driver will post messages. You can think about this as the maximum time to wait for more messages to batch. |
| `SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE` | `1000` | How many messages driver should wait before sending them in one batch. |
| `SPLUNK_LOGGING_DRIVER_BUFFER_MAX` | `10 * 1000` | If driver cannot connect to remote server, what is the maximum amount of messages it can hold in buffer for retries. |
| `SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE` | `4 * 1000` | How many pending messages can be in the channel which is used to send messages to background logger worker, which batches them. |

View file

@ -1,27 +0,0 @@
<!-- [metadata]>
+++
title = "Admin Guide"
description = "Administer Docker"
keywords = ["Administer"]
type="menu"
[menu.main]
parent="engine_use"
identifier="engine_admin"
weight="-70"
+++
<![end-metadata]-->
# Admin Topics
* [Configuring and running Docker](index.md)
* [Automatically start containers](host_integration.md)
* [Keep containers alive during daemon downtime](live-restore.md)
* [Control and configure Docker with systemd](systemd.md)
* [Format command and log output](formatting.md)
* [Run a local registry mirror](registry_mirror.md)
* [PowerShell DSC Usage](dsc.md)
* [Using Chef](chef.md)
* [Using Puppet](puppet.md)
* [Using Supervisor with Docker](using_supervisord.md)
* [Runtime metrics](runmetrics.md)
* [Link via an ambassador container](ambassador_pattern_linking.md)

View file

@ -1,101 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/articles/puppet/"]
title = "Using Puppet"
description = "Installing and using Puppet"
keywords = ["puppet, installation, usage, docker, documentation"]
[menu.main]
parent = "engine_admin"
weight="12"
+++
<![end-metadata]-->
# Using Puppet
> *Note:* Please note this is a community contributed installation path. The
> only `official` installation is using the
> [*Ubuntu*](../installation/linux/ubuntulinux.md) installation
> path. This version may sometimes be out of date.
## Requirements
To use this guide you'll need a working installation of Puppet from
[Puppet Labs](https://puppetlabs.com) .
The module also currently uses the official PPA so only works with
Ubuntu.
## Installation
The module is available on the [Puppet
Forge](https://forge.puppetlabs.com/garethr/docker/) and can be
installed using the built-in module tool.
$ puppet module install garethr/docker
It can also be found on
[GitHub](https://github.com/garethr/garethr-docker) if you would rather
download the source.
## Usage
The module provides a puppet class for installing Docker and two defined
types for managing images and containers.
### Installation
include 'docker'
### Images
The next step is probably to install a Docker image. For this, we have a
defined type which can be used like so:
docker::image { 'ubuntu': }
This is equivalent to running:
$ docker pull ubuntu
Note that it will only be downloaded if an image of that name does not
already exist. This is downloading a large binary so on first run can
take a while. For that reason this define turns off the default 5 minute
timeout for the exec type. Note that you can also remove images you no
longer need with:
docker::image { 'ubuntu':
ensure => 'absent',
}
### Containers
Now you have an image where you can run commands within a container
managed by Docker.
docker::run { 'helloworld':
image => 'ubuntu',
command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"',
}
This is equivalent to running the following command, but under upstart:
$ docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done"
Run also contains a number of optional parameters:
docker::run { 'helloworld':
image => 'ubuntu',
command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"',
ports => ['4444', '4555'],
volumes => ['/var/lib/couchdb', '/var/log'],
volumes_from => '6446ea52fbc9',
memory_limit => 10485760, # bytes
username => 'example',
hostname => 'example.com',
env => ['FOO=BAR', 'FOO2=BAR2'],
dns => ['8.8.8.8', '8.8.4.4'],
}
> *Note:*
> The `ports`, `env`, `dns` and `volumes` attributes can be set with either a single
> string or as above with an array of values.

View file

@ -1,19 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/articles/registry_mirror/"]
title = "Run a local registry mirror"
description = "How to set up and run a local registry mirror"
keywords = ["docker, registry, mirror, examples"]
[menu.main]
parent = "engine_admin"
weight = 8
+++
<![end-metadata]-->
# Run a local registry mirror
The original content was deprecated. [An archived
version](https://docs.docker.com/v1.6/articles/registry_mirror) is available in
the 1.7 documentation. For information about configuring mirrors with the latest
Docker Registry version, please file a support request with [the Distribution
project](https://github.com/docker/distribution/issues).

View file

@ -1,391 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/articles/run_metrics"]
title = "Runtime metrics"
description = "Measure the behavior of running containers"
keywords = ["docker, metrics, CPU, memory, disk, IO, run, runtime, stats"]
[menu.main]
parent = "engine_admin"
weight = 14
+++
<![end-metadata]-->
# Runtime metrics
## Docker stats
You can use the `docker stats` command to live stream a container's
runtime metrics. The command supports CPU, memory usage, memory limit,
and network IO metrics.
The following is a sample output from the `docker stats` command
```bash
$ docker stats redis1 redis2
CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O
redis1 0.07% 796 KB / 64 MB 1.21% 788 B / 648 B 3.568 MB / 512 KB
redis2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B
```
The [docker stats](../reference/commandline/stats.md) reference page has
more details about the `docker stats` command.
## Control groups
Linux Containers rely on [control groups](
https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt)
which not only track groups of processes, but also expose metrics about
CPU, memory, and block I/O usage. You can access those metrics and
obtain network usage metrics as well. This is relevant for "pure" LXC
containers, as well as for Docker containers.
Control groups are exposed through a pseudo-filesystem. In recent
distros, you should find this filesystem under `/sys/fs/cgroup`. Under
that directory, you will see multiple sub-directories, called devices,
freezer, blkio, etc.; each sub-directory actually corresponds to a different
cgroup hierarchy.
On older systems, the control groups might be mounted on `/cgroup`, without
distinct hierarchies. In that case, instead of seeing the sub-directories,
you will see a bunch of files in that directory, and possibly some directories
corresponding to existing containers.
To figure out where your control groups are mounted, you can run:
```bash
$ grep cgroup /proc/mounts
```
## Enumerating cgroups
You can look into `/proc/cgroups` to see the different control group subsystems
known to the system, the hierarchy they belong to, and how many groups they contain.
You can also look at `/proc/<pid>/cgroup` to see which control groups a process
belongs to. The control group will be shown as a path relative to the root of
the hierarchy mountpoint; e.g., `/` means "this process has not been assigned into
a particular group", while `/lxc/pumpkin` means that the process is likely to be
a member of a container named `pumpkin`.
## Finding the cgroup for a given container
For each container, one cgroup will be created in each hierarchy. On
older systems with older versions of the LXC userland tools, the name of
the cgroup will be the name of the container. With more recent versions
of the LXC tools, the cgroup will be `lxc/<container_name>.`
For Docker containers using cgroups, the container name will be the full
ID or long ID of the container. If a container shows up as ae836c95b4c3
in `docker ps`, its long ID might be something like
`ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can
look it up with `docker inspect` or `docker ps --no-trunc`.
Putting everything together to look at the memory metrics for a Docker
container, take a look at `/sys/fs/cgroup/memory/docker/<longid>/`.
## Metrics from cgroups: memory, CPU, block I/O
For each subsystem (memory, CPU, and block I/O), you will find one or
more pseudo-files containing statistics.
### Memory metrics: `memory.stat`
Memory metrics are found in the "memory" cgroup. Note that the memory
control group adds a little overhead, because it does very fine-grained
accounting of the memory usage on your host. Therefore, many distros
chose to not enable it by default. Generally, to enable it, all you have
to do is to add some kernel command-line parameters:
`cgroup_enable=memory swapaccount=1`.
The metrics are in the pseudo-file `memory.stat`.
Here is what it will look like:
cache 11492564992
rss 1930993664
mapped_file 306728960
pgpgin 406632648
pgpgout 403355412
swap 0
pgfault 728281223
pgmajfault 1724
inactive_anon 46608384
active_anon 1884520448
inactive_file 7003344896
active_file 4489052160
unevictable 32768
hierarchical_memory_limit 9223372036854775807
hierarchical_memsw_limit 9223372036854775807
total_cache 11492564992
total_rss 1930993664
total_mapped_file 306728960
total_pgpgin 406632648
total_pgpgout 403355412
total_swap 0
total_pgfault 728281223
total_pgmajfault 1724
total_inactive_anon 46608384
total_active_anon 1884520448
total_inactive_file 7003344896
total_active_file 4489052160
total_unevictable 32768
The first half (without the `total_` prefix) contains statistics relevant
to the processes within the cgroup, excluding sub-cgroups. The second half
(with the `total_` prefix) includes sub-cgroups as well.
Some metrics are "gauges", i.e., values that can increase or decrease
(e.g., swap, the amount of swap space used by the members of the cgroup).
Some others are "counters", i.e., values that can only go up, because
they represent occurrences of a specific event (e.g., pgfault, which
indicates the number of page faults which happened since the creation of
the cgroup; this number can never decrease).
<style>table tr > td:first-child { white-space: nowrap;}</style>
Metric | Description
--------------------------------------|-----------------------------------------------------------
**cache** | The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device. When you read from and write to files on disk, this amount will increase. This will be the case if you use "conventional" I/O (`open`, `read`, `write` syscalls) as well as mapped files (with `mmap`). It also accounts for the memory used by `tmpfs` mounts, though the reasons are unclear.
**rss** | The amount of memory that *doesn't* correspond to anything on disk: stacks, heaps, and anonymous memory maps.
**mapped_file** | Indicates the amount of memory mapped by the processes in the control group. It doesn't give you information about *how much* memory is used; it rather tells you *how* it is used.
**pgfault**, **pgmajfault** | Indicate the number of times that a process of the cgroup triggered a "page fault" and a "major fault", respectively. A page fault happens when a process accesses a part of its virtual memory space which is nonexistent or protected. The former can happen if the process is buggy and tries to access an invalid address (it will then be sent a `SIGSEGV` signal, typically killing it with the famous `Segmentation fault` message). The latter can happen when the process reads from a memory zone which has been swapped out, or which corresponds to a mapped file: in that case, the kernel will load the page from disk, and let the CPU complete the memory access. It can also happen when the process writes to a copy-on-write memory zone: likewise, the kernel will preempt the process, duplicate the memory page, and resume the write operation on the process` own copy of the page. "Major" faults happen when the kernel actually has to read the data from disk. When it just has to duplicate an existing page, or allocate an empty page, it's a regular (or "minor") fault.
**swap** | The amount of swap currently used by the processes in this cgroup.
**active_anon**, **inactive_anon** | The amount of *anonymous* memory that has been identified has respectively *active* and *inactive* by the kernel. "Anonymous" memory is the memory that is *not* linked to disk pages. In other words, that's the equivalent of the rss counter described above. In fact, the very definition of the rss counter is **active_anon** + **inactive_anon** - **tmpfs** (where tmpfs is the amount of memory used up by `tmpfs` filesystems mounted by this control group). Now, what's the difference between "active" and "inactive"? Pages are initially "active"; and at regular intervals, the kernel sweeps over the memory, and tags some pages as "inactive". Whenever they are accessed again, they are immediately retagged "active". When the kernel is almost out of memory, and time comes to swap out to disk, the kernel will swap "inactive" pages.
**active_file**, **inactive_file** | Cache memory, with *active* and *inactive* similar to the *anon* memory above. The exact formula is **cache** = **active_file** + **inactive_file** + **tmpfs**. The exact rules used by the kernel to move memory pages between active and inactive sets are different from the ones used for anonymous memory, but the general principle is the same. Note that when the kernel needs to reclaim memory, it is cheaper to reclaim a clean (=non modified) page from this pool, since it can be reclaimed immediately (while anonymous pages and dirty/modified pages have to be written to disk first).
**unevictable** | The amount of memory that cannot be reclaimed; generally, it will account for memory that has been "locked" with `mlock`. It is often used by crypto frameworks to make sure that secret keys and other sensitive material never gets swapped out to disk.
**memory_limit**, **memsw_limit** | These are not really metrics, but a reminder of the limits applied to this cgroup. The first one indicates the maximum amount of physical memory that can be used by the processes of this control group; the second one indicates the maximum amount of RAM+swap.
Accounting for memory in the page cache is very complex. If two
processes in different control groups both read the same file
(ultimately relying on the same blocks on disk), the corresponding
memory charge will be split between the control groups. It's nice, but
it also means that when a cgroup is terminated, it could increase the
memory usage of another cgroup, because they are not splitting the cost
anymore for those memory pages.
### CPU metrics: `cpuacct.stat`
Now that we've covered memory metrics, everything else will look very
simple in comparison. CPU metrics will be found in the
`cpuacct` controller.
For each container, you will find a pseudo-file `cpuacct.stat`,
containing the CPU usage accumulated by the processes of the container,
broken down between `user` and `system` time. If you're not familiar
with the distinction, `user` is the time during which the processes were
in direct control of the CPU (i.e., executing process code), and `system`
is the time during which the CPU was executing system calls on behalf of
those processes.
Those times are expressed in ticks of 1/100th of a second. Actually,
they are expressed in "user jiffies". There are `USER_HZ`
*"jiffies"* per second, and on x86 systems,
`USER_HZ` is 100. This used to map exactly to the
number of scheduler "ticks" per second; but with the advent of higher
frequency scheduling, as well as [tickless kernels](
http://lwn.net/Articles/549580/), the number of kernel ticks
wasn't relevant anymore. It stuck around anyway, mainly for legacy and
compatibility reasons.
### Block I/O metrics
Block I/O is accounted in the `blkio` controller.
Different metrics are scattered across different files. While you can
find in-depth details in the [blkio-controller](
https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)
file in the kernel documentation, here is a short list of the most
relevant ones:
Metric | Description
----------------------------|-----------------------------------------------------------
**blkio.sectors** | contains the number of 512-bytes sectors read and written by the processes member of the cgroup, device by device. Reads and writes are merged in a single counter.
**blkio.io_service_bytes** | indicates the number of bytes read and written by the cgroup. It has 4 counters per device, because for each device, it differentiates between synchronous vs. asynchronous I/O, and reads vs. writes.
**blkio.io_serviced** | the number of I/O operations performed, regardless of their size. It also has 4 counters per device.
**blkio.io_queued** | indicates the number of I/O operations currently queued for this cgroup. In other words, if the cgroup isn't doing any I/O, this will be zero. Note that the opposite is not true. In other words, if there is no I/O queued, it does not mean that the cgroup is idle (I/O-wise). It could be doing purely synchronous reads on an otherwise quiescent device, which is therefore able to handle them immediately, without queuing. Also, while it is helpful to figure out which cgroup is putting stress on the I/O subsystem, keep in mind that it is a relative quantity. Even if a process group does not perform more I/O, its queue size can increase just because the device load increases because of other devices.
## Network metrics
Network metrics are not exposed directly by control groups. There is a
good explanation for that: network interfaces exist within the context
of *network namespaces*. The kernel could probably accumulate metrics
about packets and bytes sent and received by a group of processes, but
those metrics wouldn't be very useful. You want per-interface metrics
(because traffic happening on the local `lo`
interface doesn't really count). But since processes in a single cgroup
can belong to multiple network namespaces, those metrics would be harder
to interpret: multiple network namespaces means multiple `lo`
interfaces, potentially multiple `eth0`
interfaces, etc.; so this is why there is no easy way to gather network
metrics with control groups.
Instead we can gather network metrics from other sources:
### IPtables
IPtables (or rather, the netfilter framework for which iptables is just
an interface) can do some serious accounting.
For instance, you can setup a rule to account for the outbound HTTP
traffic on a web server:
```bash
$ iptables -I OUTPUT -p tcp --sport 80
```
There is no `-j` or `-g` flag,
so the rule will just count matched packets and go to the following
rule.
Later, you can check the values of the counters, with:
```bash
$ iptables -nxvL OUTPUT
```
Technically, `-n` is not required, but it will
prevent iptables from doing DNS reverse lookups, which are probably
useless in this scenario.
Counters include packets and bytes. If you want to setup metrics for
container traffic like this, you could execute a `for`
loop to add two `iptables` rules per
container IP address (one in each direction), in the `FORWARD`
chain. This will only meter traffic going through the NAT
layer; you will also have to add traffic going through the userland
proxy.
Then, you will need to check those counters on a regular basis. If you
happen to use `collectd`, there is a [nice plugin](https://collectd.org/wiki/index.php/Table_of_Plugins)
to automate iptables counters collection.
### Interface-level counters
Since each container has a virtual Ethernet interface, you might want to
check directly the TX and RX counters of this interface. You will notice
that each container is associated to a virtual Ethernet interface in
your host, with a name like `vethKk8Zqi`. Figuring
out which interface corresponds to which container is, unfortunately,
difficult.
But for now, the best way is to check the metrics *from within the
containers*. To accomplish this, you can run an executable from the host
environment within the network namespace of a container using **ip-netns
magic**.
The `ip-netns exec` command will let you execute any
program (present in the host system) within any network namespace
visible to the current process. This means that your host will be able
to enter the network namespace of your containers, but your containers
won't be able to access the host, nor their sibling containers.
Containers will be able to "see" and affect their sub-containers,
though.
The exact format of the command is:
```bash
$ ip netns exec <nsname> <command...>
```
For example:
```bash
$ ip netns exec mycontainer netstat -i
```
`ip netns` finds the "mycontainer" container by
using namespaces pseudo-files. Each process belongs to one network
namespace, one PID namespace, one `mnt` namespace,
etc., and those namespaces are materialized under
`/proc/<pid>/ns/`. For example, the network
namespace of PID 42 is materialized by the pseudo-file
`/proc/42/ns/net`.
When you run `ip netns exec mycontainer ...`, it
expects `/var/run/netns/mycontainer` to be one of
those pseudo-files. (Symlinks are accepted.)
In other words, to execute a command within the network namespace of a
container, we need to:
- Find out the PID of any process within the container that we want to investigate;
- Create a symlink from `/var/run/netns/<somename>` to `/proc/<thepid>/ns/net`
- Execute `ip netns exec <somename> ....`
Please review [Enumerating Cgroups](#enumerating-cgroups) to learn how to find
the cgroup of a process running in the container of which you want to
measure network usage. From there, you can examine the pseudo-file named
`tasks`, which contains the PIDs that are in the
control group (i.e., in the container). Pick any one of them.
Putting everything together, if the "short ID" of a container is held in
the environment variable `$CID`, then you can do this:
```bash
$ TASKS=/sys/fs/cgroup/devices/docker/$CID*/tasks
$ PID=$(head -n 1 $TASKS)
$ mkdir -p /var/run/netns
$ ln -sf /proc/$PID/ns/net /var/run/netns/$CID
$ ip netns exec $CID netstat -i
```
## Tips for high-performance metric collection
Note that running a new process each time you want to update metrics is
(relatively) expensive. If you want to collect metrics at high
resolutions, and/or over a large number of containers (think 1000
containers on a single host), you do not want to fork a new process each
time.
Here is how to collect metrics from a single process. You will have to
write your metric collector in C (or any language that lets you do
low-level system calls). You need to use a special system call,
`setns()`, which lets the current process enter any
arbitrary namespace. It requires, however, an open file descriptor to
the namespace pseudo-file (remember: that's the pseudo-file in
`/proc/<pid>/ns/net`).
However, there is a catch: you must not keep this file descriptor open.
If you do, when the last process of the control group exits, the
namespace will not be destroyed, and its network resources (like the
virtual interface of the container) will stay around for ever (or until
you close that file descriptor).
The right approach would be to keep track of the first PID of each
container, and re-open the namespace pseudo-file each time.
## Collecting metrics when a container exits
Sometimes, you do not care about real time metric collection, but when a
container exits, you want to know how much CPU, memory, etc. it has
used.
Docker makes this difficult because it relies on `lxc-start`, which
carefully cleans up after itself, but it is still possible. It is
usually easier to collect metrics at regular intervals (e.g., every
minute, with the collectd LXC plugin) and rely on that instead.
But, if you'd still like to gather the stats when a container stops,
here is how:
For each container, start a collection process, and move it to the
control groups that you want to monitor by writing its PID to the tasks
file of the cgroup. The collection process should periodically re-read
the tasks file to check if it's the last process of the control group.
(If you also want to collect network statistics as explained in the
previous section, you should also move the process to the appropriate
network namespace.)
When the container exits, `lxc-start` will try to
delete the control groups. It will fail, since the control group is
still in use; but that's fine. You process should now detect that it is
the only one remaining in the group. Now is the right time to collect
all the metrics you need!
Finally, your process should move itself back to the root control group,
and remove the container control group. To remove a control group, just
`rmdir` its directory. It's counter-intuitive to
`rmdir` a directory as it still contains files; but
remember that this is a pseudo-filesystem, so usual rules don't apply.
After the cleanup is done, the collection process can exit safely.

View file

@ -1,177 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/articles/systemd/"]
title = "Control and configure Docker with systemd"
description = "Controlling and configuring Docker using systemd"
keywords = ["docker, daemon, systemd, configuration"]
[menu.main]
parent = "engine_admin"
weight="7"
+++
<![end-metadata]-->
# Control and configure Docker with systemd
Many Linux distributions use systemd to start the Docker daemon. This document
shows a few examples of how to customize Docker's settings.
## Starting the Docker daemon
Once Docker is installed, you will need to start the Docker daemon.
$ sudo systemctl start docker
# or on older distributions, you may need to use
$ sudo service docker start
If you want Docker to start at boot, you should also:
$ sudo systemctl enable docker
# or on older distributions, you may need to use
$ sudo chkconfig docker on
## Custom Docker daemon options
There are a number of ways to configure the daemon flags and environment variables
for your Docker daemon.
The recommended way is to use a systemd drop-in file (as described in
the <a target="_blank"
href="https://www.freedesktop.org/software/systemd/man/systemd.unit.html">systemd.unit</a>
documentation). These are local files named `<something>.conf` in the
`/etc/systemd/system/docker.service.d` directory. This could also be
`/etc/systemd/system/docker.service`, which also works for overriding
the defaults from `/lib/systemd/system/docker.service`.
However, if you had previously used a package which had an
`EnvironmentFile` (often pointing to `/etc/sysconfig/docker`) then for
backwards compatibility, you drop a file with a `.conf` extension into
the `/etc/systemd/system/docker.service.d` directory including the
following:
[Service]
EnvironmentFile=-/etc/sysconfig/docker
EnvironmentFile=-/etc/sysconfig/docker-storage
EnvironmentFile=-/etc/sysconfig/docker-network
ExecStart=
ExecStart=/usr/bin/dockerd $OPTIONS \
$DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \
$BLOCK_REGISTRY \
$INSECURE_REGISTRY
To check if the `docker.service` uses an `EnvironmentFile`:
$ systemctl show docker | grep EnvironmentFile
EnvironmentFile=-/etc/sysconfig/docker (ignore_errors=yes)
Alternatively, find out where the service file is located:
$ systemctl show --property=FragmentPath docker
FragmentPath=/usr/lib/systemd/system/docker.service
$ grep EnvironmentFile /usr/lib/systemd/system/docker.service
EnvironmentFile=-/etc/sysconfig/docker
You can customize the Docker daemon options using override files as explained in the
[HTTP Proxy example](#http-proxy) below. The files located in `/usr/lib/systemd/system`
or `/lib/systemd/system` contain the default options and should not be edited.
### Runtime directory and storage driver
You may want to control the disk space used for Docker images, containers
and volumes by moving it to a separate partition.
In this example, we'll assume that your `docker.service` file looks something like:
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
This will allow us to add extra flags via a drop-in file (mentioned above) by
placing a file containing the following in the `/etc/systemd/system/docker.service.d`
directory:
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd --graph="/mnt/docker-data" --storage-driver=overlay
You can also set other environment variables in this file, for example, the
`HTTP_PROXY` environment variables described below.
To modify the ExecStart configuration, specify an empty configuration followed
by a new configuration as follows:
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd --bip=172.17.42.1/16
If you fail to specify an empty configuration, Docker reports an error such as:
docker.service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. Refusing.
### HTTP proxy
This example overrides the default `docker.service` file.
If you are behind an HTTP proxy server, for example in corporate settings,
you will need to add this configuration in the Docker systemd service file.
First, create a systemd drop-in directory for the docker service:
mkdir /etc/systemd/system/docker.service.d
Now create a file called `/etc/systemd/system/docker.service.d/http-proxy.conf`
that adds the `HTTP_PROXY` environment variable:
[Service]
Environment="HTTP_PROXY=http://proxy.example.com:80/"
If you have internal Docker registries that you need to contact without
proxying you can specify them via the `NO_PROXY` environment variable:
Environment="HTTP_PROXY=http://proxy.example.com:80/" "NO_PROXY=localhost,127.0.0.1,docker-registry.somecorporation.com"
Flush changes:
$ sudo systemctl daemon-reload
Verify that the configuration has been loaded:
$ systemctl show --property=Environment docker
Environment=HTTP_PROXY=http://proxy.example.com:80/
Restart Docker:
$ sudo systemctl restart docker
## Manually creating the systemd unit files
When installing the binary without a package, you may want
to integrate Docker with systemd. For this, simply install the two unit files
(service and socket) from [the github
repository](https://github.com/docker/docker/tree/master/contrib/init/systemd)
to `/etc/systemd/system`.

View file

@ -1,156 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/articles/using_supervisord/"]
title = "Using Supervisor with Docker"
description = "How to use Supervisor process management with Docker"
keywords = ["docker, supervisor, process management"]
[menu.main]
parent = "engine_admin"
weight="13"
+++
<![end-metadata]-->
# Using Supervisor with Docker
> **Note**:
> - **If you don't like sudo** then see [*Giving non-root
> access*](../installation/binaries.md#giving-non-root-access)
Traditionally a Docker container runs a single process when it is launched, for
example an Apache daemon or a SSH server daemon. Often though you want to run
more than one process in a container. There are a number of ways you can
achieve this ranging from using a simple Bash script as the value of your
container's `CMD` instruction to installing a process management tool.
In this example you're going to make use of the process management tool,
[Supervisor](http://supervisord.org/), to manage multiple processes in a
container. Using Supervisor allows you to better control, manage, and restart
the processes inside the container. To demonstrate this we're going to install
and manage both an SSH daemon and an Apache daemon.
## Creating a Dockerfile
Let's start by creating a basic `Dockerfile` for our new image.
```Dockerfile
FROM ubuntu:16.04
MAINTAINER examples@docker.com
```
## Installing Supervisor
You can now install the SSH and Apache daemons as well as Supervisor in the
container.
```Dockerfile
RUN apt-get update && apt-get install -y openssh-server apache2 supervisor
RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /var/log/supervisor
```
The first `RUN` instruction installs the `openssh-server`, `apache2` and
`supervisor` (which provides the Supervisor daemon) packages. The next `RUN`
instruction creates four new directories that are needed to run the SSH daemon
and Supervisor.
## Adding Supervisor's configuration file
Now let's add a configuration file for Supervisor. The default file is called
`supervisord.conf` and is located in `/etc/supervisor/conf.d/`.
```Dockerfile
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
```
Let's see what is inside the `supervisord.conf` file.
```ini
[supervisord]
nodaemon=true
[program:sshd]
command=/usr/sbin/sshd -D
[program:apache2]
command=/bin/bash -c "source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND"
```
The `supervisord.conf` configuration file contains directives that configure
Supervisor and the processes it manages. The first block `[supervisord]`
provides configuration for Supervisor itself. The `nodaemon` directive is used,
which tells Supervisor to run interactively rather than daemonize.
The next two blocks manage the services we wish to control. Each block controls
a separate process. The blocks contain a single directive, `command`, which
specifies what command to run to start each process.
## Exposing ports and running Supervisor
Now let's finish the `Dockerfile` by exposing some required ports and
specifying the `CMD` instruction to start Supervisor when our container
launches.
```Dockerfile
EXPOSE 22 80
CMD ["/usr/bin/supervisord"]
```
These instructions tell Docker that ports 22 and 80 are exposed by the
container and that the `/usr/bin/supervisord` binary should be executed when
the container launches.
## Building our image
Your completed Dockerfile now looks like this:
```Dockerfile
FROM ubuntu:16.04
MAINTAINER examples@docker.com
RUN apt-get update && apt-get install -y openssh-server apache2 supervisor
RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /var/log/supervisor
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
EXPOSE 22 80
CMD ["/usr/bin/supervisord"]
```
And your `supervisord.conf` file looks like this;
```ini
[supervisord]
nodaemon=true
[program:sshd]
command=/usr/sbin/sshd -D
[program:apache2]
command=/bin/bash -c "source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND"
```
You can now build the image using this command;
```bash
$ docker build -t mysupervisord .
```
## Running your Supervisor container
Once you have built your image you can launch a container from it.
```bash
$ docker run -p 22 -p 80 -t -i mysupervisord
2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file)
2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing
2013-11-25 18:53:22,342 INFO supervisord started with pid 1
2013-11-25 18:53:23,346 INFO spawned: 'sshd' with pid 6
2013-11-25 18:53:23,349 INFO spawned: 'apache2' with pid 7
...
```
You launched a new container interactively using the `docker run` command.
That container has run Supervisor and launched the SSH and Apache daemons with
it. We've specified the `-p` flag to expose ports 22 and 80. From here we can
now identify the exposed ports and connect to one or both of the SSH and Apache
daemons.

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

View file

@ -1,59 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/misc/breaking/"]
title = "Breaking changes"
description = "Breaking changes"
keywords = ["docker, documentation, about, technology, breaking",
"incompatibilities"]
[menu.main]
parent = "engine_use"
weight=80
+++
<![end-metadata]-->
# Breaking changes and incompatibilities
Every Engine release strives to be backward compatible with its predecessors.
In all cases, the policy is that feature removal is communicated two releases
in advance and documented as part of the [deprecated features](deprecated.md)
page.
Unfortunately, Docker is a fast moving project, and newly introduced features
may sometime introduce breaking changes and/or incompatibilities. This page
documents these by Engine version.
# Engine 1.12
Docker clients <= 1.9.2 used an invalid Host header when making request to the
daemon. Docker 1.12 is built using golang 1.6 which is now checking the validity
of the Host header and as such clients <= 1.9.2 can't talk anymore to the daemon.
[An environment variable was added to overcome this issue.](reference/commandline/dockerd.md#miscellaneous-options)
# Engine 1.10
There were two breaking changes in the 1.10 release.
## Registry
Registry 2.3 includes improvements to the image manifest that have caused a
breaking change. Images pushed by Engine 1.10 to a Registry 2.3 cannot be
pulled by digest by older Engine versions. A `docker pull` that encounters this
situation returns the following error:
```
Error response from daemon: unsupported schema version 2 for tag TAGNAME
```
Docker Content Trust heavily relies on pull by digest. As a result, images
pushed from the Engine 1.10 CLI to a 2.3 Registry cannot be pulled by older
Engine CLIs (< 1.10) with Docker Content Trust enabled.
If you are using an older Registry version (< 2.3), this problem does not occur
with any version of the Engine CLI; push, pull, with and without content trust
work as you would expect.
## Docker Content Trust
Engine older than the current 1.10 cannot pull images from repositories that
have enabled key delegation. Key delegation is a feature which requires a
manual action to enable.

View file

@ -1,15 +0,0 @@
#
# Build: docker build -t apt-cacher .
# Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher
#
# and then you can run containers with:
# docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash
#
FROM ubuntu
MAINTAINER SvenDowideit@docker.com
VOLUME ["/var/cache/apt-cacher-ng"]
RUN apt-get update && apt-get install -y apt-cacher-ng
EXPOSE 3142
CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/*

View file

@ -1,129 +0,0 @@
<!--[metadata]>
+++
title = "Dockerizing an apt-cacher-ng service"
description = "Installing and running an apt-cacher-ng service"
keywords = ["docker, example, package installation, networking, debian, ubuntu"]
[menu.main]
parent = "engine_dockerize"
+++
<![end-metadata]-->
# Dockerizing an apt-cacher-ng service
> **Note**:
> - **If you don't like sudo** then see [*Giving non-root
> access*](../installation/binaries.md#giving-non-root-access).
> - **If you're using OS X or docker via TCP** then you shouldn't use
> sudo.
When you have multiple Docker servers, or build unrelated Docker
containers which can't make use of the Docker build cache, it can be
useful to have a caching proxy for your packages. This container makes
the second download of any package almost instant.
Use the following Dockerfile:
#
# Build: docker build -t apt-cacher .
# Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher
#
# and then you can run containers with:
# docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash
#
# Here, `dockerhost` is the IP address or FQDN of a host running the Docker daemon
# which acts as an APT proxy server.
FROM ubuntu
MAINTAINER SvenDowideit@docker.com
VOLUME ["/var/cache/apt-cacher-ng"]
RUN apt-get update && apt-get install -y apt-cacher-ng
EXPOSE 3142
CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/*
To build the image using:
$ docker build -t eg_apt_cacher_ng .
Then run it, mapping the exposed port to one on the host
$ docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
To see the logfiles that are `tailed` in the default command, you can
use:
$ docker logs -f test_apt_cacher_ng
To get your Debian-based containers to use the proxy, you have
following options. Note that you must replace `dockerhost` with the
IP address or FQDN of the host running the `test_apt_cacher_ng`
container.
1. Add an apt Proxy setting
`echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy`
2. Set an environment variable:
`http_proxy=http://dockerhost:3142/`
3. Change your `sources.list` entries to start with
`http://dockerhost:3142/`
4. Link Debian-based containers to the APT proxy container using `--link`
5. Create a custom network of an APT proxy container with Debian-based containers.
**Option 1** injects the settings safely into your apt configuration in
a local version of a common base:
FROM ubuntu
RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy
RUN apt-get update && apt-get install -y vim git
# docker build -t my_ubuntu .
**Option 2** is good for testing, but will break other HTTP clients
which obey `http_proxy`, such as `curl`, `wget` and others:
$ docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
**Option 3** is the least portable, but there will be times when you
might need to do it and you can do it from your `Dockerfile`
too.
**Option 4** links Debian-containers to the proxy server using following command:
$ docker run -i -t --link test_apt_cacher_ng:apt_proxy -e http_proxy=http://apt_proxy:3142/ debian bash
**Option 5** creates a custom network of APT proxy server and Debian-based containers:
$ docker network create mynetwork
$ docker run -d -p 3142:3142 --network=mynetwork --name test_apt_cacher_ng eg_apt_cacher_ng
$ docker run --rm -it --network=mynetwork -e http_proxy=http://test_apt_cacher_ng:3142/ debian bash
Apt-cacher-ng has some tools that allow you to manage the repository,
and they can be used by leveraging the `VOLUME`
instruction, and the image we built to run the service:
$ docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
root@f38c87f2a42d:/# /usr/lib/apt-cacher-ng/distkill.pl
Scanning /var/cache/apt-cacher-ng, please wait...
Found distributions:
bla, taggedcount: 0
1. precise-security (36 index files)
2. wheezy (25 index files)
3. precise-updates (36 index files)
4. precise (36 index files)
5. wheezy-updates (18 index files)
Found architectures:
6. amd64 (36 index files)
7. i386 (24 index files)
WARNING: The removal action may wipe out whole directories containing
index files. Select d to see detailed list.
(Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q
Finally, clean up after your test by stopping and removing the
container, and then removing the image.
$ docker stop test_apt_cacher_ng
$ docker rm test_apt_cacher_ng
$ docker rmi eg_apt_cacher_ng

View file

@ -1,235 +0,0 @@
<!--[metadata]>
+++
title = "Dockerizing a Couchbase service"
description = "Dockerizing a Couchbase service"
keywords = ["docker, example, package installation, networking, couchbase"]
[menu.main]
parent = "engine_dockerize"
+++
<![end-metadata]-->
# Dockerizing a Couchbase service
This example shows how to start a [Couchbase](http://couchbase.com) server using Docker Compose, configure it using its [REST API](http://developer.couchbase.com/documentation/server/4.0/rest-api/rest-endpoints-all.html), and query it.
Couchbase is an open source, document-oriented NoSQL database for modern web, mobile, and IoT applications. It is designed for ease of development and Internet-scale performance.
## Start Couchbase server
Couchbase Docker images are published at [Docker Hub](https://hub.docker.com/_/couchbase/).
Start Couchbase server as:
```
docker run -d --name db -p 8091-8093:8091-8093 -p 11210:11210 couchbase
```
The purpose of each port exposed is explained at [Couchbase Developer Portal - Network Configuration](http://developer.couchbase.com/documentation/server/4.1/install/install-ports.html).
Logs can be seen as:
```
docker logs db
Starting Couchbase Server -- Web UI available at http://<ip>:8091
```
> **Note**: The examples on this page assume that the Docker Host
> is reachable on `192.168.99.100`. Substitute `192.168.99.100` with
> the actual IP address of your Docker Host. If you're running
> Docker using Docker machine, you can obtain the IP address
> of the Docker host using `docker-machine ip <MACHINE-NAME>`.
The logs show that Couchbase console can be accessed at `http://192.168.99.100:8091`. The default username is `Administrator` and the password is `password`.
## Configure Couchbase Docker container
By default, Couchbase server needs to be configured using the console before it can be used. This can be simplified by configuring it using the REST API.
### Configure memory for Data and Index service
Data, Query and Index are three different services that can be configured on a Couchbase instance. Each service has different operating needs. For example, Query is CPU intensive operation and so requires a faster processor. Index is disk heavy and so requires a faster solid state drive. Data needs to be read/written fast and so requires more memory.
Memory needs to be configured for Data and Index service only.
```
curl -v -X POST http://192.168.99.100:8091/pools/default -d memoryQuota=300 -d indexMemoryQuota=300
* Hostname was NOT found in DNS cache
* Trying 192.168.99.100...
* Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0)
> POST /pools/default HTTP/1.1
> User-Agent: curl/7.37.1
> Host: 192.168.99.100:8091
> Accept: */*
> Content-Length: 36
> Content-Type: application/x-www-form-urlencoded
>
* upload completely sent off: 36 out of 36 bytes
< HTTP/1.1 401 Unauthorized
< WWW-Authenticate: Basic realm="Couchbase Server Admin / REST"
* Server Couchbase Server is not blacklisted
< Server: Couchbase Server
< Pragma: no-cache
< Date: Wed, 25 Nov 2015 22:48:16 GMT
< Content-Length: 0
< Cache-Control: no-cache
<
* Connection #0 to host 192.168.99.100 left intact
```
The command shows an HTTP POST request to the REST endpoint `/pools/default`. The host is the IP address of the Docker machine. The port is the exposed port of Couchbase server. The memory and index quota for the server are passed in the request.
### Configure Data, Query, and Index services
All three services, or only one of them, can be configured on each instance. This allows different Couchbase instances to use affinities and setup services accordingly. For example, if Docker host is running a machine with solid-state drive then only Data service can be started.
```
curl -v http://192.168.99.100:8091/node/controller/setupServices -d 'services=kv%2Cn1ql%2Cindex'
* Hostname was NOT found in DNS cache
* Trying 192.168.99.100...
* Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0)
> POST /node/controller/setupServices HTTP/1.1
> User-Agent: curl/7.37.1
> Host: 192.168.99.100:8091
> Accept: */*
> Content-Length: 26
> Content-Type: application/x-www-form-urlencoded
>
* upload completely sent off: 26 out of 26 bytes
< HTTP/1.1 200 OK
* Server Couchbase Server is not blacklisted
< Server: Couchbase Server
< Pragma: no-cache
< Date: Wed, 25 Nov 2015 22:49:51 GMT
< Content-Length: 0
< Cache-Control: no-cache
<
* Connection #0 to host 192.168.99.100 left intact
```
The command shows an HTTP POST request to the REST endpoint `/node/controller/setupServices`. The command shows that all three services are configured for the Couchbase server. The Data service is identified by `kv`, Query service is identified by `n1ql` and Index service identified by `index`.
### Setup credentials for the Couchbase server
Sets the username and password credentials that will subsequently be used for managing the Couchbase server.
```
curl -v -X POST http://192.168.99.100:8091/settings/web -d port=8091 -d username=Administrator -d password=password
* Hostname was NOT found in DNS cache
* Trying 192.168.99.100...
* Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0)
> POST /settings/web HTTP/1.1
> User-Agent: curl/7.37.1
> Host: 192.168.99.100:8091
> Accept: */*
> Content-Length: 50
> Content-Type: application/x-www-form-urlencoded
>
* upload completely sent off: 50 out of 50 bytes
< HTTP/1.1 200 OK
* Server Couchbase Server is not blacklisted
< Server: Couchbase Server
< Pragma: no-cache
< Date: Wed, 25 Nov 2015 22:50:43 GMT
< Content-Type: application/json
< Content-Length: 44
< Cache-Control: no-cache
<
* Connection #0 to host 192.168.99.100 left intact
{"newBaseUri":"http://192.168.99.100:8091/"}
```
The command shows an HTTP POST request to the REST endpoint `/settings/web`. The user name and password credentials are passed in the request.
### Install sample data
The Couchbase server can be easily load some sample data in the Couchbase instance.
```
curl -v -u Administrator:password -X POST http://192.168.99.100:8091/sampleBuckets/install -d '["travel-sample"]'
* Hostname was NOT found in DNS cache
* Trying 192.168.99.100...
* Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0)
* Server auth using Basic with user 'Administrator'
> POST /sampleBuckets/install HTTP/1.1
> Authorization: Basic QWRtaW5pc3RyYXRvcjpwYXNzd29yZA==
> User-Agent: curl/7.37.1
> Host: 192.168.99.100:8091
> Accept: */*
> Content-Length: 17
> Content-Type: application/x-www-form-urlencoded
>
* upload completely sent off: 17 out of 17 bytes
< HTTP/1.1 202 Accepted
* Server Couchbase Server is not blacklisted
< Server: Couchbase Server
< Pragma: no-cache
< Date: Wed, 25 Nov 2015 22:51:51 GMT
< Content-Type: application/json
< Content-Length: 2
< Cache-Control: no-cache
<
* Connection #0 to host 192.168.99.100 left intact
[]
```
The command shows an HTTP POST request to the REST endpoint `/sampleBuckets/install`. The name of the sample bucket is passed in the request.
Congratulations, you are now running a Couchbase container, fully configured using the REST API.
## Query Couchbase using CBQ
[CBQ](http://developer.couchbase.com/documentation/server/4.1/cli/cbq-tool.html), short for Couchbase Query, is a CLI tool that allows to create, read, update, and delete JSON documents on a Couchbase server. This tool is installed as part of the Couchbase Docker image.
Run CBQ tool:
```
docker run -it --link db:db couchbase cbq --engine http://db:8093
Couchbase query shell connected to http://db:8093/ . Type Ctrl-D to exit.
cbq>
```
`--engine` parameter to CBQ allows to specify the Couchbase server host and port running on the Docker host. For host, typically the host name or IP address of the host where Couchbase server is running is provided. In this case, the container name used when starting the container, `db`, can be used. `8093` port listens for all incoming queries.
Couchbase allows to query JSON documents using [N1QL](http://developer.couchbase.com/documentation/server/4.1/n1ql/n1ql-language-reference/index.html). N1QL is a comprehensive, declarative query language that brings SQL-like query capabilities to JSON documents.
Query the database by running a N1QL query:
```
cbq> select * from `travel-sample` limit 1;
{
"requestID": "97816771-3c25-4a1d-9ea8-eb6ad8a51919",
"signature": {
"*": "*"
},
"results": [
{
"travel-sample": {
"callsign": "MILE-AIR",
"country": "United States",
"iata": "Q5",
"icao": "MLA",
"id": 10,
"name": "40-Mile Air",
"type": "airline"
}
}
],
"status": "success",
"metrics": {
"elapsedTime": "60.872423ms",
"executionTime": "60.792258ms",
"resultCount": 1,
"resultSize": 300
}
}
```
## Couchbase Web Console
[Couchbase Web Console](http://developer.couchbase.com/documentation/server/4.1/admin/ui-intro.html) is a console that allows to manage a Couchbase instance. It can be seen at:
`http://192.168.99.100:8091/`
Make sure to replace the IP address with the IP address of your Docker Machine or `localhost` if Docker is running locally.
![Couchbase Web Console](couchbase/web-console.png)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 158 KiB

View file

@ -1,49 +0,0 @@
<!--[metadata]>
+++
title = "Dockerizing a CouchDB service"
description = "Sharing data between 2 couchdb databases"
keywords = ["docker, example, package installation, networking, couchdb, data volumes"]
[menu.main]
parent = "engine_dockerize"
+++
<![end-metadata]-->
# Dockerizing a CouchDB service
> **Note**:
> - **If you don't like sudo** then see [*Giving non-root
> access*](../installation/binaries.md#giving-non-root-access)
Here's an example of using data volumes to share the same data between
two CouchDB containers. This could be used for hot upgrades, testing
different versions of CouchDB on the same data, etc.
## Create first database
Note that we're marking `/var/lib/couchdb` as a data volume.
$ COUCH1=$(docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
## Add data to the first database
We're assuming your Docker host is reachable at `localhost`. If not,
replace `localhost` with the public IP of your Docker host.
$ HOST=localhost
$ URL="http://$HOST:$(docker port $COUCH1 5984 | grep -o '[1-9][0-9]*$')/_utils/"
$ echo "Navigate to $URL in your browser, and use the couch interface to add data"
## Create second database
This time, we're requesting shared access to `$COUCH1`'s volumes.
$ COUCH2=$(docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03)
## Browse data on the second database
$ HOST=localhost
$ URL="http://$HOST:$(docker port $COUCH2 5984 | grep -o '[1-9][0-9]*$')/_utils/"
$ echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
Congratulations, you are now running two Couchdb containers, completely
isolated from each other *except* for their data.

View file

@ -1,22 +0,0 @@
<!--[metadata]>
+++
title = "Dockerize an application"
description = "Provides examples for using Docker"
keywords = ["dockerize, dockerizing apps, dockerizing applications, container, containers"]
[menu.main]
identifier = "engine_dockerize"
parent="engine_use"
weight = 8
+++
<![end-metadata]-->
# Dockerize an application
This section contains the following:
* [Dockerizing MongoDB](mongodb.md)
* [Dockerizing PostgreSQL](postgresql_service.md)
* [Dockerizing a CouchDB service](couchdb_data_volumes.md)
* [Dockerizing a Redis service](running_redis_service.md)
* [Dockerizing an apt-cacher-ng service](apt-cacher-ng.md)
* [Dockerizing applications: A 'Hello world'](../tutorials/dockerizing.md)

View file

@ -1,177 +0,0 @@
<!--[metadata]>
+++
title = "Dockerizing MongoDB"
description = "Creating a Docker image with MongoDB pre-installed using a Dockerfile and sharing the image on Docker Hub"
keywords = ["docker, dockerize, dockerizing, article, example, docker.io, platform, package, installation, networking, mongodb, containers, images, image, sharing, dockerfile, build, auto-building, framework"]
[menu.main]
parent = "engine_dockerize"
+++
<![end-metadata]-->
# Dockerizing MongoDB
## Introduction
In this example, we are going to learn how to build a Docker image with
MongoDB pre-installed. We'll also see how to `push` that image to the
[Docker Hub registry](https://hub.docker.com) and share it with others!
> **Note:** This guide will show the mechanics of building a MongoDB container, but
> you will probably want to use the official image on [Docker Hub]( https://hub.docker.com/_/mongo/)
Using Docker and containers for deploying [MongoDB](https://www.mongodb.org/)
instances will bring several benefits, such as:
- Easy to maintain, highly configurable MongoDB instances;
- Ready to run and start working within milliseconds;
- Based on globally accessible and shareable images.
> **Note:**
>
> If you do **_not_** like `sudo`, you might want to check out:
> [*Giving non-root access*](../installation/binaries.md#giving-non-root-access).
## Creating a Dockerfile for MongoDB
Let's create our `Dockerfile` and start building it:
$ nano Dockerfile
Although optional, it is handy to have comments at the beginning of a
`Dockerfile` explaining its purpose:
# Dockerizing MongoDB: Dockerfile for building MongoDB images
# Based on ubuntu:latest, installs MongoDB following the instructions from:
# http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/
> **Tip:** `Dockerfile`s are flexible. However, they need to follow a certain
> format. The first item to be defined is the name of an image, which becomes
> the *parent* of your *Dockerized MongoDB* image.
We will build our image using the latest version of Ubuntu from the
[Docker Hub Ubuntu](https://hub.docker.com/_/ubuntu/) repository.
# Format: FROM repository[:version]
FROM ubuntu:latest
Continuing, we will declare the `MAINTAINER` of the `Dockerfile`:
# Format: MAINTAINER Name <email@addr.ess>
MAINTAINER M.Y. Name <myname@addr.ess>
> **Note:** Although Ubuntu systems have MongoDB packages, they are likely to
> be outdated. Therefore in this example, we will use the official MongoDB
> packages.
We will begin with importing the MongoDB public GPG key. We will also create
a MongoDB repository file for the package manager.
# Installation:
# Import MongoDB public GPG key AND create a MongoDB list file
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list
After this initial preparation we can update our packages and install MongoDB.
# Update apt-get sources AND install MongoDB
RUN apt-get update && apt-get install -y mongodb-org
> **Tip:** You can install a specific version of MongoDB by using a list
> of required packages with versions, e.g.:
>
> RUN apt-get update && apt-get install -y mongodb-org=3.0.1 mongodb-org-server=3.0.1 mongodb-org-shell=3.0.1 mongodb-org-mongos=3.0.1 mongodb-org-tools=3.0.1
MongoDB requires a data directory. Let's create it as the final step of our
installation instructions.
# Create the MongoDB data directory
RUN mkdir -p /data/db
Lastly we set the `ENTRYPOINT` which will tell Docker to run `mongod` inside
the containers launched from our MongoDB image. And for ports, we will use
the `EXPOSE` instruction.
# Expose port 27017 from the container to the host
EXPOSE 27017
# Set usr/bin/mongod as the dockerized entry-point application
ENTRYPOINT ["/usr/bin/mongod"]
Now save the file and let's build our image.
> **Note:**
>
> The full version of this `Dockerfile` can be found [here](https://github.com/docker/docker/blob/master/docs/examples/mongodb/Dockerfile).
## Building the MongoDB Docker image
With our `Dockerfile`, we can now build the MongoDB image using Docker. Unless
experimenting, it is always a good practice to tag Docker images by passing the
`--tag` option to `docker build` command.
# Format: docker build --tag/-t <user-name>/<repository> .
# Example:
$ docker build --tag my/repo .
Once this command is issued, Docker will go through the `Dockerfile` and build
the image. The final image will be tagged `my/repo`.
## Pushing the MongoDB image to Docker Hub
All Docker image repositories can be hosted and shared on
[Docker Hub](https://hub.docker.com) with the `docker push` command. For this,
you need to be logged-in.
# Log-in
$ docker login
Username:
..
# Push the image
# Format: docker push <user-name>/<repository>
$ docker push my/repo
The push refers to a repository [my/repo] (len: 1)
Sending image list
Pushing repository my/repo (1 tags)
..
## Using the MongoDB image
Using the MongoDB image we created, we can run one or more MongoDB instances
as daemon process(es).
# Basic way
# Usage: docker run --name <name for container> -d <user-name>/<repository>
$ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo
# Dockerized MongoDB, lean and mean!
# Usage: docker run --name <name for container> -d <user-name>/<repository> --noprealloc --smallfiles
$ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo --smallfiles
# Checking out the logs of a MongoDB container
# Usage: docker logs <name for container>
$ docker logs mongo_instance_001
# Playing with MongoDB
# Usage: mongo --port <port you get from `docker ps`>
$ mongo --port 27017
# If using docker-machine
# Usage: mongo --port <port you get from `docker ps`> --host <ip address from `docker-machine ip VM_NAME`>
$ mongo --port 27017 --host 192.168.59.103
> **Tip:**
If you want to run two containers on the same engine, then you will need to map
the exposed port to two different ports on the host
# Start two containers and map the ports
$ docker run -p 28001:27017 --name mongo_instance_001 -d my/repo
$ docker run -p 28002:27017 --name mongo_instance_002 -d my/repo
# Now you can connect to each MongoDB instance on the two ports
$ mongo --port 28001
$ mongo --port 28002
- [Linking containers](../userguide/networking/default_network/dockerlinks.md)
- [Cross-host linking containers](../admin/ambassador_pattern_linking.md)
- [Creating an Automated Build](https://docs.docker.com/docker-hub/builds/)

View file

@ -1,23 +0,0 @@
# Dockerizing MongoDB: Dockerfile for building MongoDB images
# Based on ubuntu:16.04, installs MongoDB following the instructions from:
# http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/
FROM ubuntu:16.04
MAINTAINER Docker
# Installation:
# Import MongoDB public GPG key AND create a MongoDB list file
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
RUN echo "deb http://repo.mongodb.org/apt/ubuntu $(cat /etc/lsb-release | grep DISTRIB_CODENAME | cut -d= -f2)/mongodb-org/3.2 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.2.list
# Update apt-get sources AND install MongoDB
RUN apt-get update && apt-get install -y mongodb-org
# Create the MongoDB data directory
RUN mkdir -p /data/db
# Expose port #27017 from the container to the host
EXPOSE 27017
# Set /usr/bin/mongod as the dockerized entry-point application
ENTRYPOINT ["/usr/bin/mongod"]

View file

@ -1,49 +0,0 @@
#
# example Dockerfile for https://docs.docker.com/examples/postgresql_service/
#
FROM ubuntu
MAINTAINER SvenDowideit@docker.com
# Add the PostgreSQL PGP key to verify their Debian packages.
# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
# Add PostgreSQL's repository. It contains the most recent stable release
# of PostgreSQL, ``9.3``.
RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
# Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3
# There are some warnings (in red) that show up during the build. You can hide
# them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
# Note: The official Debian and Ubuntu images automatically ``apt-get clean``
# after each ``apt-get``
# Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed``
USER postgres
# Create a PostgreSQL role named ``docker`` with ``docker`` as the password and
# then create a database `docker` owned by the ``docker`` role.
# Note: here we use ``&&\`` to run commands one after the other - the ``\``
# allows the RUN command to span multiple lines.
RUN /etc/init.d/postgresql start &&\
psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\
createdb -O docker docker
# Adjust PostgreSQL configuration so that remote connections to the
# database are possible.
RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf
# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf``
RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf
# Expose the PostgreSQL port
EXPOSE 5432
# Add VOLUMEs to allow backup of config, logs and databases
VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"]
# Set the default command to run when starting the container
CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"]

View file

@ -1,153 +0,0 @@
<!--[metadata]>
+++
title = "Dockerizing PostgreSQL"
description = "Running and installing a PostgreSQL service"
keywords = ["docker, example, package installation, postgresql"]
[menu.main]
parent = "engine_dockerize"
+++
<![end-metadata]-->
# Dockerizing PostgreSQL
> **Note**:
> - **If you don't like sudo** then see [*Giving non-root
> access*](../installation/binaries.md#giving-non-root-access)
## Installing PostgreSQL on Docker
Assuming there is no Docker image that suits your needs on the [Docker
Hub](http://hub.docker.com), you can create one yourself.
Start by creating a new `Dockerfile`:
> **Note**:
> This PostgreSQL setup is for development-only purposes. Refer to the
> PostgreSQL documentation to fine-tune these settings so that it is
> suitably secure.
#
# example Dockerfile for https://docs.docker.com/examples/postgresql_service/
#
FROM ubuntu
MAINTAINER SvenDowideit@docker.com
# Add the PostgreSQL PGP key to verify their Debian packages.
# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
# Add PostgreSQL's repository. It contains the most recent stable release
# of PostgreSQL, ``9.3``.
RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
# Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3
# There are some warnings (in red) that show up during the build. You can hide
# them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
# Note: The official Debian and Ubuntu images automatically ``apt-get clean``
# after each ``apt-get``
# Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed``
USER postgres
# Create a PostgreSQL role named ``docker`` with ``docker`` as the password and
# then create a database `docker` owned by the ``docker`` role.
# Note: here we use ``&&\`` to run commands one after the other - the ``\``
# allows the RUN command to span multiple lines.
RUN /etc/init.d/postgresql start &&\
psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\
createdb -O docker docker
# Adjust PostgreSQL configuration so that remote connections to the
# database are possible.
RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf
# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf``
RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf
# Expose the PostgreSQL port
EXPOSE 5432
# Add VOLUMEs to allow backup of config, logs and databases
VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"]
# Set the default command to run when starting the container
CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"]
Build an image from the Dockerfile assign it a name.
$ docker build -t eg_postgresql .
And run the PostgreSQL server container (in the foreground):
$ docker run --rm -P --name pg_test eg_postgresql
There are 2 ways to connect to the PostgreSQL server. We can use [*Link
Containers*](../userguide/networking/default_network/dockerlinks.md), or we can access it from our host
(or the network).
> **Note**:
> The `--rm` removes the container and its image when
> the container exits successfully.
### Using container linking
Containers can be linked to another container's ports directly using
`-link remote_name:local_alias` in the client's
`docker run`. This will set a number of environment
variables that can then be used to connect:
$ docker run --rm -t -i --link pg_test:pg eg_postgresql bash
postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password
### Connecting from your host system
Assuming you have the postgresql-client installed, you can use the
host-mapped port to test as well. You need to use `docker ps`
to find out what local host port the container is mapped to
first:
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test
$ psql -h localhost -p 49153 -d docker -U docker --password
### Testing the database
Once you have authenticated and have a `docker =#`
prompt, you can create a table and populate it.
psql (9.3.1)
Type "help" for help.
$ docker=# CREATE TABLE cities (
docker(# name varchar(80),
docker(# location point
docker(# );
CREATE TABLE
$ docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)');
INSERT 0 1
$ docker=# select * from cities;
name | location
---------------+-----------
San Francisco | (-194,53)
(1 row)
### Using the container volumes
You can use the defined volumes to inspect the PostgreSQL log files and
to backup your configuration and data:
$ docker run --rm --volumes-from pg_test -t -i busybox sh
/ # ls
bin etc lib linuxrc mnt proc run sys usr
dev home lib64 media opt root sbin tmp var
/ # ls /etc/postgresql/9.3/main/
environment pg_hba.conf postgresql.conf
pg_ctl.conf pg_ident.conf start.conf
/tmp # ls /var/log
ldconfig postgresql

View file

@ -1,89 +0,0 @@
<!--[metadata]>
+++
title = "Dockerizing a Redis service"
description = "Installing and running a redis service"
keywords = ["docker, example, package installation, networking, redis"]
[menu.main]
parent = "engine_dockerize"
+++
<![end-metadata]-->
# Dockerizing a Redis service
Very simple, no frills, Redis service attached to a web application
using a link.
## Create a Docker container for Redis
Firstly, we create a `Dockerfile` for our new Redis
image.
FROM ubuntu:14.04
RUN apt-get update && apt-get install -y redis-server
EXPOSE 6379
ENTRYPOINT ["/usr/bin/redis-server"]
Next we build an image from our `Dockerfile`.
Replace `<your username>` with your own user name.
$ docker build -t <your username>/redis .
## Run the service
Use the image we've just created and name your container `redis`.
Running the service with `-d` runs the container in detached mode, leaving
the container running in the background.
Importantly, we're not exposing any ports on our container. Instead
we're going to use a container link to provide access to our Redis
database.
$ docker run --name redis -d <your username>/redis
## Create your web application container
Next we can create a container for our application. We're going to use
the `-link` flag to create a link to the `redis` container we've just
created with an alias of `db`. This will create a secure tunnel to the
`redis` container and expose the Redis instance running inside that
container to only this container.
$ docker run --link redis:db -i -t ubuntu:14.04 /bin/bash
Once inside our freshly created container we need to install Redis to
get the `redis-cli` binary to test our connection.
$ sudo apt-get update
$ sudo apt-get install redis-server
$ sudo service redis-server stop
As we've used the `--link redis:db` option, Docker
has created some environment variables in our web application container.
$ env | grep DB_
# Should return something similar to this with your values
DB_NAME=/violet_wolf/db
DB_PORT_6379_TCP_PORT=6379
DB_PORT=tcp://172.17.0.33:6379
DB_PORT_6379_TCP=tcp://172.17.0.33:6379
DB_PORT_6379_TCP_ADDR=172.17.0.33
DB_PORT_6379_TCP_PROTO=tcp
We can see that we've got a small list of environment variables prefixed
with `DB`. The `DB` comes from the link alias specified when we launched
the container. Let's use the `DB_PORT_6379_TCP_ADDR` variable to connect to
our Redis container.
$ redis-cli -h $DB_PORT_6379_TCP_ADDR
$ redis 172.17.0.33:6379>
$ redis 172.17.0.33:6379> set docker awesome
OK
$ redis 172.17.0.33:6379> get docker
"awesome"
$ redis 172.17.0.33:6379> exit
We could easily use this or other environment variables in our web
application to make a connection to our `redis`
container.

View file

@ -1,31 +0,0 @@
# Riak
#
# VERSION 0.1.1
# Use the Ubuntu base image provided by dotCloud
FROM ubuntu:trusty
MAINTAINER Hector Castro hector@basho.com
# Install Riak repository before we do apt-get update, so that update happens
# in a single step
RUN apt-get install -q -y curl && \
curl -fsSL https://packagecloud.io/install/repositories/basho/riak/script.deb | sudo bash
# Install and setup project dependencies
RUN apt-get update && \
apt-get install -y supervisor riak=2.0.5-1
RUN mkdir -p /var/log/supervisor
RUN locale-gen en_US en_US.UTF-8
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
# Configure Riak to accept connections from any host
RUN sed -i "s|listener.http.internal = 127.0.0.1:8098|listener.http.internal = 0.0.0.0:8098|" /etc/riak/riak.conf
RUN sed -i "s|listener.protobuf.internal = 127.0.0.1:8087|listener.protobuf.internal = 0.0.0.0:8087|" /etc/riak/riak.conf
# Expose Riak Protocol Buffers and HTTP interfaces
EXPOSE 8087 8098
CMD ["/usr/bin/supervisord"]

View file

@ -1,108 +0,0 @@
<!--[metadata]>
+++
title = "Dockerizing a Riak service"
description = "Build a Docker image with Riak pre-installed"
keywords = ["docker, example, package installation, networking, riak"]
[menu.main]
parent = "engine_dockerize"
+++
<![end-metadata]-->
# Dockerizing a Riak service
The goal of this example is to show you how to build a Docker image with
Riak pre-installed.
## Creating a Dockerfile
Create an empty file called `Dockerfile`:
$ touch Dockerfile
Next, define the parent image you want to use to build your image on top
of. We'll use [Ubuntu](https://hub.docker.com/_/ubuntu/) (tag:
`trusty`), which is available on [Docker Hub](https://hub.docker.com):
# Riak
#
# VERSION 0.1.1
# Use the Ubuntu base image provided by dotCloud
FROM ubuntu:trusty
MAINTAINER Hector Castro hector@basho.com
After that, we install the curl which is used to download the repository setup
script and we download the setup script and run it.
# Install Riak repository before we do apt-get update, so that update happens
# in a single step
RUN apt-get install -q -y curl && \
curl -fsSL https://packagecloud.io/install/repositories/basho/riak/script.deb | sudo bash
Then we install and setup a few dependencies:
- `supervisor` is used manage the Riak processes
- `riak=2.0.5-1` is the Riak package coded to version 2.0.5
<!-- -->
# Install and setup project dependencies
RUN apt-get update && \
apt-get install -y supervisor riak=2.0.5-1
RUN mkdir -p /var/log/supervisor
RUN locale-gen en_US en_US.UTF-8
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
After that, we modify Riak's configuration:
# Configure Riak to accept connections from any host
RUN sed -i "s|listener.http.internal = 127.0.0.1:8098|listener.http.internal = 0.0.0.0:8098|" /etc/riak/riak.conf
RUN sed -i "s|listener.protobuf.internal = 127.0.0.1:8087|listener.protobuf.internal = 0.0.0.0:8087|" /etc/riak/riak.conf
Then, we expose the Riak Protocol Buffers and HTTP interfaces:
# Expose Riak Protocol Buffers and HTTP interfaces
EXPOSE 8087 8098
Finally, run `supervisord` so that Riak is started:
CMD ["/usr/bin/supervisord"]
## Create a supervisord configuration file
Create an empty file called `supervisord.conf`. Make
sure it's at the same directory level as your `Dockerfile`:
touch supervisord.conf
Populate it with the following program definitions:
[supervisord]
nodaemon=true
[program:riak]
command=bash -c "/usr/sbin/riak console"
numprocs=1
autostart=true
autorestart=true
user=riak
environment=HOME="/var/lib/riak"
stdout_logfile=/var/log/supervisor/%(program_name)s.log
stderr_logfile=/var/log/supervisor/%(program_name)s.log
## Build the Docker image for Riak
Now you should be able to build a Docker image for Riak:
$ docker build -t "<yourname>/riak" .
## Next steps
Riak is a distributed database. Many production deployments consist of
[at least five nodes](
http://basho.com/why-your-riak-cluster-should-have-at-least-five-nodes/).
See the [docker-riak](https://github.com/hectcastro/docker-riak) project
details on how to deploy a Riak cluster using Docker and Pipework.

View file

@ -1,16 +0,0 @@
FROM ubuntu:16.04
MAINTAINER Sven Dowideit <SvenDowideit@docker.com>
RUN apt-get update && apt-get install -y openssh-server
RUN mkdir /var/run/sshd
RUN echo 'root:screencast' | chpasswd
RUN sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
# SSH login fix. Otherwise user is kicked off after login
RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
ENV NOTVISIBLE "in users profile"
RUN echo "export VISIBLE=now" >> /etc/profile
EXPOSE 22
CMD ["/usr/sbin/sshd", "-D"]

View file

@ -1,89 +0,0 @@
<!--[metadata]>
+++
title = "Dockerizing an SSH service"
description = "Installing and running an SSHd service on Docker"
keywords = ["docker, example, package installation, networking"]
[menu.main]
parent = "engine_dockerize"
+++
<![end-metadata]-->
# Dockerizing an SSH daemon service
## Build an `eg_sshd` image
The following `Dockerfile` sets up an SSHd service in a container that you
can use to connect to and inspect other container's volumes, or to get
quick access to a test container.
```Dockerfile
FROM ubuntu:16.04
MAINTAINER Sven Dowideit <SvenDowideit@docker.com>
RUN apt-get update && apt-get install -y openssh-server
RUN mkdir /var/run/sshd
RUN echo 'root:screencast' | chpasswd
RUN sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
# SSH login fix. Otherwise user is kicked off after login
RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
ENV NOTVISIBLE "in users profile"
RUN echo "export VISIBLE=now" >> /etc/profile
EXPOSE 22
CMD ["/usr/sbin/sshd", "-D"]
```
Build the image using:
```bash
$ docker build -t eg_sshd .
```
## Run a `test_sshd` container
Then run it. You can then use `docker port` to find out what host port
the container's port 22 is mapped to:
```bash
$ docker run -d -P --name test_sshd eg_sshd
$ docker port test_sshd 22
0.0.0.0:49154
```
And now you can ssh as `root` on the container's IP address (you can find it
with `docker inspect`) or on port `49154` of the Docker daemon's host IP address
(`ip address` or `ifconfig` can tell you that) or `localhost` if on the
Docker daemon host:
```bash
$ ssh root@192.168.1.2 -p 49154
# The password is ``screencast``.
root@f38c87f2a42d:/#
```
## Environment variables
Using the `sshd` daemon to spawn shells makes it complicated to pass environment
variables to the user's shell via the normal Docker mechanisms, as `sshd` scrubs
the environment before it starts the shell.
If you're setting values in the `Dockerfile` using `ENV`, you'll need to push them
to a shell initialization file like the `/etc/profile` example in the `Dockerfile`
above.
If you need to pass`docker run -e ENV=value` values, you will need to write a
short script to do the same before you start `sshd -D` and then replace the
`CMD` with that script.
## Clean up
Finally, clean up after your test by stopping and removing the
container, and then removing the image.
```bash
$ docker stop test_sshd
$ docker rm test_sshd
$ docker rmi eg_sshd
```

View file

@ -1,12 +0,0 @@
[supervisord]
nodaemon=true
[program:riak]
command=bash -c "/usr/sbin/riak console"
numprocs=1
autostart=true
autorestart=true
user=riak
environment=HOME="/var/lib/riak"
stdout_logfile=/var/log/supervisor/%(program_name)s.log
stderr_logfile=/var/log/supervisor/%(program_name)s.log

View file

@ -1,294 +0,0 @@
<!--[metadata]>
+++
aliases = ["/engine/misc/faq/"]
title = "FAQ"
description = "Most frequently asked questions."
keywords = ["faq, questions, documentation, docker"]
[menu.main]
identifier="engine_faq"
parent = "engine_use"
weight = 80
+++
<![end-metadata]-->
# Frequently Asked Questions (FAQ)
If you don't see your question here, feel free to submit new ones to
<docs@docker.com>. Or, you can fork [the
repo](https://github.com/docker/docker) and contribute them yourself by editing
the documentation sources.
### How much does Engine cost?
Docker Engine is 100% free. It is open source, so you can use it without paying.
### What open source license are you using?
We are using the Apache License Version 2.0, see it here:
[https://github.com/docker/docker/blob/master/LICENSE](
https://github.com/docker/docker/blob/master/LICENSE)
### Does Docker run on Mac OS X or Windows?
Docker Engine currently runs only on Linux, but you can use VirtualBox to run
Engine in a virtual machine on your box, and get the best of both worlds. Check
out the [*Mac OS X*](installation/mac.md) and [*Microsoft
Windows*](installation/windows.md) installation guides. The small Linux
distribution boot2docker can be set up using the Docker Machine tool to be run
inside virtual machines on these two operating systems.
>**Note:** if you are using a remote Docker Engine daemon on a VM through Docker
>Machine, then _do not_ type the `sudo` before the `docker` commands shown in
>the documentation's examples.
### How do containers compare to virtual machines?
They are complementary. VMs are best used to allocate chunks of hardware
resources. Containers operate at the process level, which makes them very
lightweight and perfect as a unit of software delivery.
### What does Docker technology add to just plain LXC?
Docker technology is not a replacement for LXC. "LXC" refers to capabilities of
the Linux kernel (specifically namespaces and control groups) which allow
sandboxing processes from one another, and controlling their resource
allocations. On top of this low-level foundation of kernel features, Docker
offers a high-level tool with several powerful functionalities:
- *Portable deployment across machines.* Docker defines a format for bundling
an application and all its dependencies into a single object which can be
transferred to any Docker-enabled machine, and executed there with the
guarantee that the execution environment exposed to the application will be the
same. LXC implements process sandboxing, which is an important pre-requisite
for portable deployment, but that alone is not enough for portable deployment.
If you sent me a copy of your application installed in a custom LXC
configuration, it would almost certainly not run on my machine the way it does
on yours, because it is tied to your machine's specific configuration:
networking, storage, logging, distro, etc. Docker defines an abstraction for
these machine-specific settings, so that the exact same Docker container can
run - unchanged - on many different machines, with many different
configurations.
- *Application-centric.* Docker is optimized for the deployment of
applications, as opposed to machines. This is reflected in its API, user
interface, design philosophy and documentation. By contrast, the `lxc` helper
scripts focus on containers as lightweight machines - basically servers that
boot faster and need less RAM. We think there's more to containers than just
that.
- *Automatic build.* Docker includes [*a tool for developers to automatically
assemble a container from their source
code*](reference/builder.md), with full control over application
dependencies, build tools, packaging etc. They are free to use `make`, `maven`,
`chef`, `puppet`, `salt,` Debian packages, RPMs, source tarballs, or any
combination of the above, regardless of the configuration of the machines.
- *Versioning.* Docker includes git-like capabilities for tracking successive
versions of a container, inspecting the diff between versions, committing new
versions, rolling back etc. The history also includes how a container was
assembled and by whom, so you get full traceability from the production server
all the way back to the upstream developer. Docker also implements incremental
uploads and downloads, similar to `git pull`, so new versions of a container
can be transferred by only sending diffs.
- *Component re-use.* Any container can be used as a [*"base image"*](reference/glossary.md#image) to create more specialized components. This can
be done manually or as part of an automated build. For example you can prepare
the ideal Python environment, and use it as a base for 10 different
applications. Your ideal PostgreSQL setup can be re-used for all your future
projects. And so on.
- *Sharing.* Docker has access to a public registry [on Docker Hub](https://hub.docker.com/)
where thousands of people have uploaded useful images: anything from Redis,
CouchDB, PostgreSQL to IRC bouncers to Rails app servers to Hadoop to base
images for various Linux distros. The
[*registry*](https://docs.docker.com/registry/) also
includes an official "standard library" of useful containers maintained by the
Docker team. The registry itself is open-source, so anyone can deploy their own
registry to store and transfer private containers, for internal server
deployments for example.
- *Tool ecosystem.* Docker defines an API for automating and customizing the
creation and deployment of containers. There are a huge number of tools
integrating with Docker to extend its capabilities. PaaS-like deployment
(Dokku, Deis, Flynn), multi-node orchestration (Maestro, Salt, Mesos, Openstack
Nova), management dashboards (docker-ui, Openstack Horizon, Shipyard),
configuration management (Chef, Puppet), continuous integration (Jenkins,
Strider, Travis), etc. Docker is rapidly establishing itself as the standard
for container-based tooling.
### What is different between a Docker container and a VM?
There's a great StackOverflow answer [showing the differences](
http://stackoverflow.com/questions/16047306/how-is-docker-io-different-from-a-normal-virtual-machine).
### Do I lose my data when the container exits?
Not at all! Any data that your application writes to disk gets preserved in its
container until you explicitly delete the container. The file system for the
container persists even after the container halts.
### How far do Docker containers scale?
Some of the largest server farms in the world today are based on containers.
Large web deployments like Google and Twitter, and platform providers such as
Heroku and dotCloud all run on container technology, at a scale of hundreds of
thousands or even millions of containers running in parallel.
### How do I connect Docker containers?
Currently the recommended way to connect containers is via the Docker network feature. You can see details of how to [work with Docker networks here](userguide/networking/work-with-networks.md).
Also useful for more flexible service portability is the [Ambassador linking
pattern](admin/ambassador_pattern_linking.md).
### How do I run more than one process in a Docker container?
Any capable process supervisor such as [http://supervisord.org/](
http://supervisord.org/), runit, s6, or daemontools can do the trick. Docker
will start up the process management daemon which will then fork to run
additional processes. As long as the processor manager daemon continues to run,
the container will continue to as well. You can see a more substantial example
[that uses supervisord here](admin/using_supervisord.md).
### What platforms does Docker run on?
Linux:
- Ubuntu 12.04, 13.04 et al
- Fedora 19/20+
- RHEL 6.5+
- CentOS 6+
- Gentoo
- ArchLinux
- openSUSE 12.3+
- CRUX 3.0+
Cloud:
- Amazon EC2
- Google Compute Engine
- Microsoft Azure
- Rackspace
### How do I report a security issue with Docker?
You can learn about the project's security policy
[here](https://www.docker.com/security/) and report security issues to this
[mailbox](mailto:security@docker.com).
### Why do I need to sign my commits to Docker with the DCO?
Please read [our blog post](
http://blog.docker.com/2014/01/docker-code-contributions-require-developer-certificate-of-origin/) on the introduction of the DCO.
### When building an image, should I prefer system libraries or bundled ones?
*This is a summary of a discussion on the [docker-dev mailing list](
https://groups.google.com/forum/#!topic/docker-dev/L2RBSPDu1L0).*
Virtually all programs depend on third-party libraries. Most frequently, they
will use dynamic linking and some kind of package dependency, so that when
multiple programs need the same library, it is installed only once.
Some programs, however, will bundle their third-party libraries, because they
rely on very specific versions of those libraries. For instance, Node.js bundles
OpenSSL; MongoDB bundles V8 and Boost (among others).
When creating a Docker image, is it better to use the bundled libraries, or
should you build those programs so that they use the default system libraries
instead?
The key point about system libraries is not about saving disk or memory space.
It is about security. All major distributions handle security seriously, by
having dedicated security teams, following up closely with published
vulnerabilities, and disclosing advisories themselves. (Look at the [Debian
Security Information](https://www.debian.org/security/) for an example of those
procedures.) Upstream developers, however, do not always implement similar
practices.
Before setting up a Docker image to compile a program from source, if you want
to use bundled libraries, you should check if the upstream authors provide a
convenient way to announce security vulnerabilities, and if they update their
bundled libraries in a timely manner. If they don't, you are exposing yourself
(and the users of your image) to security vulnerabilities.
Likewise, before using packages built by others, you should check if the
channels providing those packages implement similar security best practices.
Downloading and installing an "all-in-one" .deb or .rpm sounds great at first,
except if you have no way to figure out that it contains a copy of the OpenSSL
library vulnerable to the [Heartbleed](http://heartbleed.com/) bug.
### Why is `DEBIAN_FRONTEND=noninteractive` discouraged in Dockerfiles?
When building Docker images on Debian and Ubuntu you may have seen errors like:
unable to initialize frontend: Dialog
These errors don't stop the image from being built but inform you that the
installation process tried to open a dialog box, but was unable to. Generally,
these errors are safe to ignore.
Some people circumvent these errors by changing the `DEBIAN_FRONTEND`
environment variable inside the Dockerfile using:
ENV DEBIAN_FRONTEND=noninteractive
This prevents the installer from opening dialog boxes during installation which
stops the errors.
While this may sound like a good idea, it *may* have side effects. The
`DEBIAN_FRONTEND` environment variable will be inherited by all images and
containers built from your image, effectively changing their behavior. People
using those images will run into problems when installing software
interactively, because installers will not show any dialog boxes.
Because of this, and because setting `DEBIAN_FRONTEND` to `noninteractive` is
mainly a 'cosmetic' change, we *discourage* changing it.
If you *really* need to change its setting, make sure to change it back to its
[default value](https://www.debian.org/releases/stable/i386/ch05s03.html.en)
afterwards.
### Why do I get `Connection reset by peer` when making a request to a service running in a container?
Typically, this message is returned if the service is already bound to your
localhost. As a result, requests coming to the container from outside are
dropped. To correct this problem, change the service's configuration on your
localhost so that the service accepts requests from all IPs. If you aren't sure
how to do this, check the documentation for your OS.
### Why do I get `Cannot connect to the Docker daemon. Is the docker daemon running on this host?` when using docker-machine?
This error points out that the docker client cannot connect to the virtual machine.
This means that either the virtual machine that works underneath `docker-machine`
is not running or that the client doesn't correctly point at it.
To verify that the docker machine is running you can use the `docker-machine ls`
command and start it with `docker-machine start` if needed.
$ docker-machine ls
NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS
default - virtualbox Stopped Unknown
$ docker-machine start default
You have to tell Docker to talk to that machine. You can do this with the
`docker-machine env` command. For example,
$ eval "$(docker-machine env default)"
$ docker ps
### Where can I find more answers?
You can find more answers on:
- [Docker user mailinglist](https://groups.google.com/d/forum/docker-user)
- [Docker developer mailinglist](https://groups.google.com/d/forum/docker-dev)
- [IRC, docker on freenode](irc://chat.freenode.net#docker)
- [GitHub](https://github.com/docker/docker)
- [Ask questions on Stackoverflow](http://stackoverflow.com/search?q=docker)
- [Join the conversation on Twitter](http://twitter.com/docker)
Looking for something else to read? Checkout the [User Guide](userguide/index.md).

View file

@ -1,66 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/mac/started/",
"/windows/started/",
"/linux/started/",
"/getting-started/"
]
title = "Get Started with Docker"
description = "Getting started with Docker"
keywords = ["beginner, getting started, Docker"]
[menu.main]
identifier = "getstart_all"
parent = "tutorial_getstart_menu"
weight="-1"
+++
<![end-metadata]-->
# Get Started with Docker
This tutorial is a for non-technical users who are interested in learning more about Docker. By following these steps, you'll learn fundamental Docker features while working through some simple tasks.
Depending on how you got here, you may or may not have already downloaded Docker for your platform and installed it.
## Got Docker?
If you haven't yet downloaded Docker for your platform or installed it, go to [Get Docker](step_one.md#step-1-get-docker).
## Ready to start working with Docker?
If you have already downloaded and installed Docker, you are ready to run Docker commands! Go to [Verify your installation](step_one.md#step-3-verify-your-installation).
### What you'll learn and do
You'll learn how to:
* install Docker software for your platform
* run a software image in a container
* browse for an image on Docker Hub
* create your own image and run it in a container
* create a Docker Hub account and an image repository
* create an image of your own
* push your image to Docker Hub for others to use
The getting started was user tested to reduce the chance of users having problems. For the best chance of success, follow the steps as written the first time before exploring on your own. It takes approximately 45 minutes to complete.
## Flavors of Docker
This tutorial is designed as a getting started with Docker, and works the same whether you are using Docker for Mac, Docker for Windows, Docker on Linux, or Docker Toolbox (for older Mac and Windows systems).
If you are using Docker Toolbox, you can use the Docker Quickstart Terminal to run Docker commands in a pre-configured environment instead of opening a command line terminal.
If you are using Docker for Mac, Docker for Windows, or Docker on Linux, you will have Docker running in the background, and your standard command line terminal is already set up to run Docker commands.
## How much command line savvy do I need?
The getting started tour uses Docker Engine CLI commands entered on the command line of a terminal window. You don't need to be a wizard at the command line, but you should be familiar with how to open your favorite shell or terminal, and run basic commands in that environment. It helps (but isn't required) to know how to navigate a directory tree, manipulate files, list running process, and so forth.
## Where to go next
Go to [the next page to install](step_one.md).
&nbsp;

View file

@ -1,70 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/mac/last_page/",
"/windows/last_page/",
"/linux/last_page/",
]
title = "Learning more"
description = "Getting started with Docker"
keywords = ["beginner, getting started, Docker"]
[menu.main]
identifier = "getstart_learn_more"
parent = "tutorial_getstart_menu"
weight = 7
+++
<![end-metadata]-->
# Learning more
This getting started provided very basic essentials for using Docker on Mac, Windows, and Linux. If you want to learn more with regard to end-to-end development, start with the full install instructions and feature overviews, then follow up with more advanced tutorials and user guides.
Depending on your interest, the Docker documentation contains a wealth of information. Here are some places to start:
<style type="text/css">
</style>
<table class="tutorial">
<tr>
<th class="tg-031e">If you are looking for</th>
<th class="tg-031e">Where to find it</th>
</tr>
<tr>
<td class="tg-031e">More about Docker for Mac, features, examples, FAQs, relationship to Docker Machine and Docker Toolbox, and how this fits in the Docker ecosystem</td>
<td class="tg-031e">[Getting Started with Docker for Mac](https://docs.docker.com/docker-for-mac/)</td>
</tr>
<tr>
<td class="tg-031e">More about Docker for Windows, features, examples, FAQs, relationship to Docker Machine and Docker Toolbox, and how this fits in the Docker ecosystem</td>
<td class="tg-031e">[Getting Started with Docker for Windows](https://docs.docker.com/docker-for-windows/)</td>
</tr>
<tr>
<td class="tg-031e">More about Docker Toolbox</td>
<td class="tg-031e">[Docker Toolbox Overview](/toolbox/overview.md)</td>
</tr>
<tr>
<td class="tg-031e">More about Docker for Linux distributions</td>
<td class="tg-031e">[Install Docker Engine on Linux](/engine/installation/linux/index.md)</td>
</tr>
<tr>
<td class="tg-031e">More advanced tutorials on running containers, building your own images, networking containers, managing data for containers, and storing images on Docker Hub</td>
<td class="tg-031e"> [Learn by example](/engine/tutorials/index.md)</a></td>
</tr>
<tr>
<td class="tg-031e">Information about the Docker product line</td>
<td class="tg-031e"><a href="http://www.docker.com/products/">The product explainer is a good place to start.</a></td>
</tr>
<tr>
<td class="tg-031e">How to set up an automated build on Docker Hub</td>
<td class="tg-031e"><a href="https://docs.docker.com/docker-hub/">Docker Hub documentation</a></td>
</tr>
<tr>
<td class="tg-031e">How to run a multi-container application with Compose</td>
<td class="tg-031e"> [Docker Compose documentation](/compose/overview.md)
</td>
</tr>
</table>
&nbsp;

View file

@ -1,44 +0,0 @@
<!--[metadata]>
+++
aliases = ["/mac/started/"]
title = "Install Docker and run hello-world"
description = "Getting started with Docker"
keywords = ["beginner, getting started, Docker, install"]
identifier = "getstart_linux_install"
parent = "tutorial_getstart_menu"
weight="-80"
+++
<![end-metadata]-->
# Example: Install Docker on Ubuntu Linux
This installation procedure for users who are unfamiliar with package
managers, and just want to try out the Getting Started tutorial while running Docker on Linux. If you are comfortable with package managers, prefer not to use
`curl`, or have problems installing and want to troubleshoot, please use our
`apt` and `yum` <a href="https://docs.docker.com/engine/installation/"
target="_blank">repositories instead for your installation</a>.
1. Log into your Ubuntu installation as a user with `sudo` privileges.
2. Verify that you have `curl` installed.
$ which curl
If `curl` isn't installed, install it after updating your manager:
$ sudo apt-get update
$ sudo apt-get install curl
3. Get the latest Docker package.
$ curl -fsSL https://get.docker.com/ | sh
The system prompts you for your `sudo` password. Then, it downloads and
installs Docker and its dependencies.
>**Note**: If your company is behind a filtering proxy, you may find that the
>`apt-key`
>command fails for the Docker repo during installation. To work around this,
>add the key directly using the following:
>
> $ curl -fsSL https://get.docker.com/gpg | sudo apt-key add -

View file

@ -1,16 +0,0 @@
<!--[metadata]>
+++
aliases = [
]
title = "Get Started with Docker"
description = "Docker Mac"
keywords = ["beginner, getting started, Docker"]
type = "menu"
[menu.main]
identifier = "tutorial_getstart_menu"
parent = "engine_use"
weight = -80
+++
<![end-metadata]-->
# Get Started with Docker

View file

@ -1,78 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/mac/step_five/",
"/windows/step_five/",
"/linux/step_five/",
]
title = "Create a Docker Hub account & repository"
description = "Getting started with Docker"
keywords = ["beginner, getting started, Docker"]
[menu.main]
identifier = "getstart_docker_hub"
parent = "tutorial_getstart_menu"
weight = 5
+++
<![end-metadata]-->
# Create a Docker Hub account & repository
You've built something really cool, you should share it. In this next section,
you'll do just that. You'll need a Docker Hub account. Then, you'll push your
image up to it so other people with Docker Engine can run it.
## Step 1: Sign up for an account
1. Use your browser to navigate to <a href="https://hub.docker.com/?utm_source=getting_started_guide&utm_medium=embedded_MacOSX&utm_campaign=create_docker_hub_account" target="_blank">the Docker Hub signup page</a>.
Your browser displays the page.
![Docker Hub signup](tutimg/hub_signup.png)
2. Fill out the form on the signup page.
Docker Hub is free. Docker does need a name, password, and email address.
3. Press **Signup**.
The browser displays the welcome to Docker Hub page.
## Step 2: Verify your email and add a repository
Before you can share anything on the hub, you need to verify your email address.
1. Open your email inbox.
2. Look for the email titled `Please confirm email for your Docker Hub account`.
If you don't see the email, check your Spam folder or wait a moment for the email to arrive.
2. Open the email and click the **Confirm Your Email** button.
The browser opens Docker Hub to your profile page.
4. Choose **Create Repository**.
The browser opens the **Create Repository** page.
5. Provide a Repository Name and Short Description.
6. Make sure Visibility is set to **Public**.
When you are done, your form should look similar to the following:
![Add repo](tutimg/add_repository.png)
6. Press **Create** when you are done.
Docker Hub creates your new repository.
## Where to go next
On this page, you opened an account on Docker Hub and created a new repository.
In the next section, you populate the repository [by tagging and pushing the
image you created earlier](step_six.md).
&nbsp;

View file

@ -1,222 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/mac/step_four/",
"/windows/step_four/",
"/linux/step_four/",
]
title = "Build your own image"
description = "Getting started with Docker"
keywords = ["beginner, getting started, Docker"]
[menu.main]
identifier = "getstart_build_image"
parent = "tutorial_getstart_menu"
weight = 4
+++
<![end-metadata]-->
# Build your own image
The `whalesay` image could be improved. It would be nice if you didn't have to
think of something to say. And you type a lot to get `whalesay` to talk.
docker run docker/whalesay cowsay boo-boo
In this next section, you will improve the `whalesay` image by building a new version that "talks on its own" and requires fewer words to run.
## Step 1: Write a Dockerfile
In this step, you use your favorite text editor to write a short Dockerfile. A
Dockerfile describes the software that is "baked" into an image. It isn't just
ingredients tho, it can tell the software what environment to use or what
commands to run. Your recipe is going to be very short.
1. Go back to your command terminal window.
2. Make a new directory by typing `mkdir mydockerbuild` and pressing RETURN.
$ mkdir mydockerbuild
This directory serves as the "context" for your build. The context just means it contains all the things you need to build your image.
3. Change to your new directory.
$ cd mydockerbuild
Right now the directory is empty.
4. Create a Dockerfile in the directory by typing `touch Dockerfile` and pressing RETURN.
$ touch Dockerfile
The command appears to do nothing but it actually creates the Dockerfile in the current directory. Just type `ls Dockerfile` to see it.
$ ls Dockerfile
Dockerfile
5. Open the `Dockerfile` in a visual text editor like <a href="https://atom.io/" target="_blank">Atom</a> or <a href="https://www.sublimetext.com/" target="_blank">Sublime</a>, or a text based editor like `vi`, or `nano` (https://www.nano-editor.org/).
6. Add a line to the file like this:
FROM docker/whalesay:latest
The `FROM` keyword tells Docker which image your image is based on. Whalesay is cute and has the `cowsay` program already, so we'll start there.
7. Now, add the `fortunes` program to the image.
RUN apt-get -y update && apt-get install -y fortunes
The `fortunes` program has a command that prints out wise sayings for our whale to say. So, the first step is to install it. This line installs the software into the image.
8. Once the image has the software it needs, you instruct the software to run
when the image is loaded.
CMD /usr/games/fortune -a | cowsay
This line tells the `fortune` program to pass a nifty quote to the `cowsay` program.
9. Check your work, your file should look like this:
FROM docker/whalesay:latest
RUN apt-get -y update && apt-get install -y fortunes
CMD /usr/games/fortune -a | cowsay
10. Save and close your Dockerfile.
At this point, you have all your software ingredients and behaviors described in a Dockerfile. You are ready to build a new image.
## Step 2: Build an image from your Dockerfile
1. At the command line, make sure the Dockerfile is in the current directory by typing `cat Dockerfile`
$ cat Dockerfile
FROM docker/whalesay:latest
RUN apt-get -y update && apt-get install -y fortunes
CMD /usr/games/fortune -a | cowsay
2. Now, build your new image by typing the `docker build -t docker-whale .` command in your terminal (don't forget the . period).
$ docker build -t docker-whale .
Sending build context to Docker daemon 2.048 kB
...snip...
Removing intermediate container a8e6faa88df3
Successfully built 7d9495d03763
The command takes several seconds to run and reports its outcome. Before
you do anything with the new image, take a minute to learn about the
Dockerfile build process.
## Step 3: Learn about the build process
The `docker build -t docker-whale .` command takes the `Dockerfile` in the
current directory, and builds an image called `docker-whale` on your local
machine. The command takes about a minute and its output looks really long and
complex. In this section, you learn what each message means.
First Docker checks to make sure it has everything it needs to build.
Sending build context to Docker daemon 2.048 kB
Then, Docker loads with the `whalesay` image. It already has this image
locally as you might recall from the last page. So, Docker doesn't need to
download it.
Step 1 : FROM docker/whalesay:latest
---> fb434121fc77
Docker moves onto the next step which is to update the `apt-get` package
manager. This takes a lot of lines, no need to list them all again here.
Step 2 : RUN apt-get -y update && apt-get install -y fortunes
---> Running in 27d224dfa5b2
Ign http://archive.ubuntu.com trusty InRelease
Ign http://archive.ubuntu.com trusty-updates InRelease
Ign http://archive.ubuntu.com trusty-security InRelease
Hit http://archive.ubuntu.com trusty Release.gpg
....snip...
Get:15 http://archive.ubuntu.com trusty-security/restricted amd64 Packages [14.8 kB]
Get:16 http://archive.ubuntu.com trusty-security/universe amd64 Packages [134 kB]
Reading package lists...
---> eb06e47a01d2
Then, Docker installs the new `fortunes` software.
Reading package lists...
Building dependency tree...
Reading state information...
The following extra packages will be installed:
fortune-mod fortunes-min librecode0
Suggested packages:
x11-utils bsdmainutils
The following NEW packages will be installed:
fortune-mod fortunes fortunes-min librecode0
0 upgraded, 4 newly installed, 0 to remove and 3 not upgraded.
Need to get 1961 kB of archives.
After this operation, 4817 kB of additional disk space will be used.
Get:1 http://archive.ubuntu.com/ubuntu/ trusty/main librecode0 amd64 3.6-21 [771 kB]
...snip......
Setting up fortunes (1:1.99.1-7) ...
Processing triggers for libc-bin (2.19-0ubuntu6.6) ...
---> c81071adeeb5
Removing intermediate container 23aa52c1897c
Finally, Docker finishes the build and reports its outcome.
Step 3 : CMD /usr/games/fortune -a | cowsay
---> Running in a8e6faa88df3
---> 7d9495d03763
Removing intermediate container a8e6faa88df3
Successfully built 7d9495d03763
## Step 4: Run your new docker-whale
In this step, you verify the new images is on your computer and then you run your new image.
1. Open a command line terminal.
2. Type `docker images` and press RETURN.
This command, you might remember, lists the images you have locally.
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
docker-whale latest 7d9495d03763 4 minutes ago 273.7 MB
docker/whalesay latest fb434121fc77 4 hours ago 247 MB
hello-world latest 91c95931e552 5 weeks ago 910 B
3. Run your new image by typing `docker run docker-whale` and pressing RETURN.
$ docker run docker-whale
_________________________________________
/ "He was a modest, good-humored boy. It \
\ was Oxford that made him insufferable." /
-----------------------------------------
\
\
\
## .
## ## ## ==
## ## ## ## ===
/""""""""""""""""___/ ===
~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~
\______ o __/
\ \ __/
\____\______/
As you can see, you've made the whale a lot smarter. It finds its own
things to say and the command line is a lot shorter! You may also notice
that Docker didn't have to download anything. That is because the image was
built locally and is already available.
## Where to go next
On this page, you learned to build an image by writing your own Dockerfile.
You ran your image in a container. You also just used Linux from your Mac yet
again. In the next section, you take the first step in sharing your image by
[creating a Docker Hub account](step_five.md).
&nbsp;

View file

@ -1,142 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/mac/step_one/",
"/windows/step_one/",
"/linux/step_one/",
]
title = "Install Docker and run hello-world"
description = "Getting started with Docker"
keywords = ["beginner, getting started, Docker, install"]
[menu.main]
identifier = "getstart_all_install"
parent = "tutorial_getstart_menu"
weight = 1
+++
<![end-metadata]-->
# Install Docker
- [Step 1: Get Docker](#step-1-get-docker)
- [Step 2: Install Docker](#step-2-install-docker)
- [Step 3: Verify your installation](#step-3-verify-your-installation)
## Step 1: Get Docker
### Docker for Mac
Docker for Mac is our newest offering for the Mac. It runs as a native Mac application and uses <a href="https://github.com/mist64/xhyve/" target="_blank">xhyve</a> to virtualize the Docker Engine environment and Linux kernel-specific features for the Docker daemon.
<a class="button" href="https://download.docker.com/mac/stable/Docker.dmg">Get Docker for Mac</a>
**Requirements**
- Mac must be a 2010 or newer model, with Intel's hardware support for memory management unit (MMU) virtualization; i.e., Extended Page Tables (EPT)
- OS X 10.10.3 Yosemite or newer
- At least 4GB of RAM
- VirtualBox prior to version 4.3.30 must NOT be installed (it is incompatible with Docker for Mac). Docker for Mac will error out on install in this case. Uninstall the older version of VirtualBox and re-try the install.
#### Docker Toolbox for the Mac
If you have an earlier Mac that doesn't meet the Docker for Mac prerequisites, <a href="https://www.docker.com/products/docker-toolbox" target="_blank">get Docker Toolbox</a> for the Mac.
See [Docker Toolbox Overview](/toolbox/overview.md) for help on installing Docker with Toolbox.
### Docker for Windows
Docker for Windows is our newest offering for PCs. It runs as a native Windows application and uses Hyper-V to virtualize the Docker Engine environment and Linux kernel-specific features for the Docker daemon.
<a class="button" href="https://download.docker.com/win/stable/InstallDocker.msi">Get Docker for Windows</a>
**Requirements**
* 64bit Windows 10 Pro, Enterprise and Education (1511 November update, Build 10586 or later). In the future we will support more versions of Windows 10.
* The Hyper-V package must be enabled. The Docker for Windows installer will enable it for you, if needed. (This requires a reboot).
#### Docker Toolbox for Windows
If you have an earlier Windows system that doesn't meet the Docker for Windows prerequisites, <a href="https://www.docker.com/products/docker-toolbox" target="_blank">get Docker Toolbox</a>.
See [Docker Toolbox Overview](/toolbox/overview.md) for help on installing Docker with Toolbox.
### Docker for Linux
Docker Engine runs natively on Linux distributions.
For full instructions on getting Docker for various Linux distributions, see [Install Docker Engine](/engine/installation/index.md).
## Step 2: Install Docker
- **Docker for Mac** - Install instructions are at [Getting Started with Docker for Mac](https://docs.docker.com/docker-for-mac/).
- **Docker for Windows** - Install instructions are at [Getting Started with Docker for Windows](https://docs.docker.com/docker-for-windows/).
- **Docker Toolbox** - Install instructions are at [Docker Toolbox Overview](/toolbox/overview.md).
- **Docker on Linux** - For a simple example of installing Docker on Ubuntu Linux so that you can work through this tutorial, see [Installing Docker on Ubuntu Linux (Example)](linux_install_help.md). Full install instructions for all flavors of Linux we support are at [Install Docker Engine](/engine/installation/index.md).
## Step 3: Verify your installation
1. Open a command-line terminal, and run some Docker commands to verify that Docker is working as expected.
Some good commands to try are `docker version` to check that you have the latest release installed and `docker ps` to see if you have any running containers. (Probably not, since you just started.)
2. Type the `docker run hello-world` command and press RETURN.
The command does some work for you, if everything runs well, the command's
output looks like this:
$ docker run hello-world
Unable to find image 'hello-world:latest' locally
latest: Pulling from library/hello-world
535020c3e8ad: Pull complete
af340544ed62: Pull complete
Digest: sha256:a68868bfe696c00866942e8f5ca39e3e31b79c1e50feaee4ce5e28df2f051d5c
Status: Downloaded newer image for hello-world:latest
Hello from Docker.
This message shows that your installation appears to be working correctly.
To generate this message, Docker took the following steps:
1. The Docker Engine CLI client contacted the Docker Engine daemon.
2. The Docker Engine daemon pulled the "hello-world" image from the Docker Hub.
3. The Docker Engine daemon created a new container from that image which runs the
executable that produces the output you are currently reading.
4. The Docker Engine daemon streamed that output to the Docker Engine CLI client, which sent it
to your terminal.
To try something more ambitious, you can run an Ubuntu container with:
$ docker run -it ubuntu bash
Share images, automate workflows, and more with a free Docker Hub account:
https://hub.docker.com
For more examples and ideas, visit:
https://docs.docker.com/userguide/
3. Run `docker ps -a` to show all containers on the system.
$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
592376ff3eb8 hello-world "/hello" 25 seconds ago Exited (0) 24 seconds ago prickly_wozniak
You should see your `hello-world` container listed in the output for the `docker ps -a` command.
The command `docker ps` shows only currently running containers. Since `hello-world` already ran and exited, it wouldn't show up with a `docker ps`.
## Looking for troubleshooting help?
Typically, the above steps work out-of-the-box, but some scenarios can cause problems. If your `docker run hello-world` didn't work and resulted in errors, check out [Troubleshooting](/toolbox/faqs/troubleshoot.md) for quick fixes to common problems.
## Where to go next
At this point, you have successfully installed the Docker software. Leave the
Docker Quickstart Terminal window open. Now, go to the next page to [read a very
short introduction Docker images and containers](step_two.md).
&nbsp;

View file

@ -1,206 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/mac/step_six/",
"/windows/step_six/",
"/linux/step_six/",
]
title = "Tag, push, & pull your image"
description = "Getting started with Docker"
keywords = ["beginner, getting started, Docker"]
[menu.main]
identifier = "getstart_tag_push_pull"
parent = "tutorial_getstart_menu"
weight = 6
+++
<![end-metadata]-->
# Tag, push, and pull your image
In this section, you tag and push your `docker-whale` image to your newly
created repository. When you are done, you test the repository by pulling your
new image.
## Step 1: Tag and push the image
If you don't already have a terminal open, open one now:
1. Go back to your command line terminal.
2. At the prompt, type `docker images` to list the images you currently have:
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
docker-whale latest 7d9495d03763 38 minutes ago 273.7 MB
<none> <none> 5dac217f722c 45 minutes ago 273.7 MB
docker/whalesay latest fb434121fc77 4 hours ago 247 MB
hello-world latest 91c95931e552 5 weeks ago 910 B
5. Find the `IMAGE ID` for your `docker-whale` image.
In this example, the id is `7d9495d03763`.
Notice that currently, the `REPOSITORY` shows the repo name `docker-whale`
but not the namespace. You need to include the `namespace` for Docker Hub to
associate it with your account. The `namespace` is the same as your Docker
Hub account name. You need to rename the image to
`YOUR_DOCKERHUB_NAME/docker-whale`.
6. Use `IMAGE ID` and the `docker tag` command to tag your `docker-whale` image.
The command you type looks like this:
![Docker tag command](tutimg/tagger.png)
Of course, your account name will be your own. So, you type the command with
your image's ID and your account name and press RETURN.
$ docker tag 7d9495d03763 maryatdocker/docker-whale:latest
7. Type the `docker images` command again to see your newly tagged image.
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
maryatdocker/docker-whale latest 7d9495d03763 5 minutes ago 273.7 MB
docker-whale latest 7d9495d03763 2 hours ago 273.7 MB
<none> <none> 5dac217f722c 5 hours ago 273.7 MB
docker/whalesay latest fb434121fc77 5 hours ago 247 MB
hello-world latest 91c95931e552 5 weeks ago 910 B
8. Use the `docker login` command to log into the Docker Hub from the command line.
The format for the login command is:
docker login
When prompted, enter your password and press enter. So, for example:
$ docker login
Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.
Username:
Password:
Login Succeeded
9. Type the `docker push` command to push your image to your new repository.
$ docker push maryatdocker/docker-whale
The push refers to a repository [maryatdocker/docker-whale] (len: 1)
7d9495d03763: Image already exists
c81071adeeb5: Image successfully pushed
eb06e47a01d2: Image successfully pushed
fb434121fc77: Image successfully pushed
5d5bd9951e26: Image successfully pushed
99da72cfe067: Image successfully pushed
1722f41ddcb5: Image successfully pushed
5b74edbcaa5b: Image successfully pushed
676c4a1897e6: Image successfully pushed
07f8e8c5e660: Image successfully pushed
37bea4ee0c81: Image successfully pushed
a82efea989f9: Image successfully pushed
e9e06b06e14c: Image successfully pushed
Digest: sha256:ad89e88beb7dc73bf55d456e2c600e0a39dd6c9500d7cd8d1025626c4b985011
10. Return to your profile on Docker Hub to see your new image.
![Docker tag command](tutimg/new_image.png)
## Step 2: Pull your new image
In this last section, you'll pull the image you just pushed to hub. Before you
do that though, you'll need to remove the original image from your local
machine. If you left the original image on your machine. Docker would not pull
from the hub &mdash; why would it? The two images are identical.
1. Make sure Docker is running, and open a command line terminal.
2. At the prompt, type `docker images` to list the images you currently have on your local machine.
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
maryatdocker/docker-whale latest 7d9495d03763 5 minutes ago 273.7 MB
docker-whale latest 7d9495d03763 2 hours ago 273.7 MB
<none> <none> 5dac217f722c 5 hours ago 273.7 MB
docker/whalesay latest fb434121fc77 5 hours ago 247 MB
hello-world latest 91c95931e552 5 weeks ago 910 B
To make a good test, you need to remove the `maryatdocker/docker-whale` and
`docker-whale` images from your local system. Removing them forces the next
`docker pull` to get the image from your repository.
3. Use the `docker rmi` to remove the `maryatdocker/docker-whale` and `docker-whale`
images.
You can use an ID or the name to remove an image.
$ docker rmi -f 7d9495d03763
$ docker rmi -f docker-whale
4. Pull and load a new image from your repository using the `docker run` command.
The command you type should include your username from Docker Hub.
docker run yourusername/docker-whale
Since the image is no longer available on your local system, Docker downloads it.
$ docker run maryatdocker/docker-whale
Unable to find image 'maryatdocker/docker-whale:latest' locally
latest: Pulling from maryatdocker/docker-whale
eb06e47a01d2: Pull complete
c81071adeeb5: Pull complete
7d9495d03763: Already exists
e9e06b06e14c: Already exists
a82efea989f9: Already exists
37bea4ee0c81: Already exists
07f8e8c5e660: Already exists
676c4a1897e6: Already exists
5b74edbcaa5b: Already exists
1722f41ddcb5: Already exists
99da72cfe067: Already exists
5d5bd9951e26: Already exists
fb434121fc77: Already exists
Digest: sha256:ad89e88beb7dc73bf55d456e2c600e0a39dd6c9500d7cd8d1025626c4b985011
Status: Downloaded newer image for maryatdocker/docker-whale:latest
________________________________________
/ Having wandered helplessly into a \
| blinding snowstorm Sam was greatly |
| relieved to see a sturdy Saint Bernard |
| dog bounding toward him with the |
| traditional keg of brandy strapped to |
| his collar. |
| |
| "At last," cried Sam, "man's best |
\ friend -- and a great big dog, too!" /
----------------------------------------
\
\
\
## .
## ## ## ==
## ## ## ## ===
/""""""""""""""""___/ ===
~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~
\______ o __/
\ \ __/
\____\______/
## Where to go next
You've done a lot, you've done all of the following fundamental Docker tasks.
* installed Docker
* run a software image in a container
* located an interesting image on Docker Hub
* run the image on your own machine
* modified an image to create your own and run it
* create a Docker Hub account and repository
* pushed your image to Docker Hub for others to share
<a href="https://twitter.com/intent/tweet?button_hashtag=dockerdocs&text=Just%20ran%20a%20container%20with%20an%20image%20I%20built.%20Find%20it%20on%20%23dockerhub.%20Build%20your%20own%3A%20http%3A%2F%2Fgoo.gl%2FMUi7cA" class="twitter-hashtag-button" data-size="large" data-related="docker" target="_blank">Tweet your accomplishment!</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
You've only scratched the surface of what Docker can do. Go to the next page to [learn more](last_page.md).
&nbsp;

View file

@ -1,143 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/mac/step_three/",
"/windows/step_three/",
"/linux/step_three/",
]
title = "Find & run the whalesay image"
description = "Getting started with Docker"
keywords = ["beginner, getting started, Docker"]
[menu.main]
identifier = "getstart_locate"
parent = "tutorial_getstart_menu"
weight = 3
+++
<![end-metadata]-->
# Find and run the whalesay image
People all over the world create Docker images. You can find these images by
browsing the Docker Hub. In this next section, you'll search for and find the
image you'll use in the rest of this getting started.
## Step 1: Locate the whalesay image
1. Open your browser and <a href="https://hub.docker.com/?utm_source=getting_started_guide&utm_medium=embedded_MacOSX&utm_campaign=find_whalesay" target=_blank> browse to the Docker Hub</a>.
![Browse Docker Hub](tutimg/browse_and_search.png)
The Docker Hub contains images from individuals like you and official images
from organizations like RedHat, IBM, Google, and a whole lot more.
2. Click **Browse & Search**.
The browser opens the search page.
3. Enter the word `whalesay` in the search bar.
![Browse Docker Hub](tutimg/image_found.png)
4. Click on the **docker/whalesay** image in the results.
The browser displays the repository for the **whalesay** image.
![Browse Docker Hub](tutimg/whale_repo.png)
Each image repository contains information about an image. It should
include information such as what kind of software the image contains and
how to use it. You may notice that the **whalesay** image is based on a
Linux distribution called Ubuntu. In the next step, you run the **whalesay** image on your machine.
## Step 2: Run the whalesay image
Make sure Docker is running. On Docker for Mac and Docker for Windows, this is indicated by the Docker whale showing in the status bar.
1. Open a command-line terminal.
2. Type the `docker run docker/whalesay cowsay boo` command and press RETURN.
This command runs the **whalesay** image in a container. Your terminal should look like the following:
$ docker run docker/whalesay cowsay boo
Unable to find image 'docker/whalesay:latest' locally
latest: Pulling from docker/whalesay
e9e06b06e14c: Pull complete
a82efea989f9: Pull complete
37bea4ee0c81: Pull complete
07f8e8c5e660: Pull complete
676c4a1897e6: Pull complete
5b74edbcaa5b: Pull complete
1722f41ddcb5: Pull complete
99da72cfe067: Pull complete
5d5bd9951e26: Pull complete
fb434121fc77: Already exists
Digest: sha256:d6ee73f978a366cf97974115abe9c4099ed59c6f75c23d03c64446bb9cd49163
Status: Downloaded newer image for docker/whalesay:latest
_____
< boo >
-----
\
\
\
## .
## ## ## ==
## ## ## ## ===
/""""""""""""""""___/ ===
~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~
\______ o __/
\ \ __/
\____\______/
The first time you run a software image, the `docker` command looks for it
on your local system. If the image isn't there, then `docker` gets it from
the hub.
5. While still in the command line terminal, type `docker images` command and press RETURN.
The command lists all the images on your local system. You should see
`docker/whalesay` in the list.
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
docker/whalesay latest fb434121fc77 3 hours ago 247 MB
hello-world latest 91c95931e552 5 weeks ago 910 B
When you run an image in a container, Docker downloads the image to your
computer. This local copy of the image saves you time. Docker only
downloads the image again if the image's source changes on the hub. You
can, of course, delete the image yourself. You'll learn more about that
later. Let's leave the image there for now because we are going to use it
later.
6. Take a moment to play with the **whalesay** container a bit.
Try running the `whalesay` image again with a word or phrase. Try a long or
short phrase. Can you break the cow?
$ docker run docker/whalesay cowsay boo-boo
_________
< boo-boo >
---------
\
\
\
## .
## ## ## ==
## ## ## ## ===
/""""""""""""""""___/ ===
~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~
\______ o __/
\ \ __/
\____\______/
## Where to go next
On this page, you learned to search for images on Docker Hub. You used your
command line to run an image. Think about it, effectively you ran a piece of
Linux software on your Mac computer. You learned that running an image copies
it on your computer. Now, you are ready to create your own Docker image.
Go on to the next part [to build your own image](step_four.md).
&nbsp;

View file

@ -1,46 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/mac/step_two/",
"/windows/step_two/",
"/linux/step_two/",
]
title = "Understand images & containers"
description = "Getting started with Docker"
keywords = ["beginner, getting started, Docker"]
[menu.main]
identifier = "getstart_understand"
parent = "tutorial_getstart_menu"
weight = 2
+++
<![end-metadata]-->
# Learn about images & containers
Docker Engine provides the core Docker technology that enables images and
containers. As the last step in your installation, you ran the
`docker run hello-world` command. The command you ran had three parts.
![Container Explainer](tutimg/container_explainer.png)
An *image* is a filesystem and parameters to use at runtime. It doesn't have
state and never changes. A *container* is a running instance of an image.
When you ran the command, Docker Engine:
* checked to see if you had the `hello-world` software image
* downloaded the image from the Docker Hub (more about the hub later)
* loaded the image into the container and "ran" it
Depending on how it was built, an image might run a simple, single command and then exit. This is what `Hello-World` did.
A Docker image, though, is capable of much more. An image can start software as complex as a database, wait for you (or someone else) to add data, store the data for later use, and then wait for the next person.
Who built the `hello-world` software image though? In this case, Docker did but anyone can. Docker Engine lets people (or companies) create and share software through Docker images. Using Docker Engine, you don't have to worry about whether your computer can run the software in a Docker image &mdash; a Docker container *can always run it*.
## Where to go next
See, that was quick wasn't it? Now, you are ready to do some really fun stuff with Docker.
Go on to the next part [to find and run the whalesay image](step_three.md).
&nbsp;

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 69 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 136 KiB

View file

@ -1,121 +0,0 @@
<!-- [metadata]>
+++
aliases = ["/engine/misc/"]
title = "Docker Engine"
description = "Engine"
keywords = ["Engine"]
[menu.main]
identifier="engine_use"
weight=-85
+++
<![end-metadata]-->
# About Docker Engine
**Develop, Ship and Run Any Application, Anywhere**
[**Docker**](https://www.docker.com) is a platform for developers and sysadmins
to develop, ship, and run applications. Docker lets you quickly assemble
applications from components and eliminates the friction that can come when
shipping code. Docker lets you get your code tested and deployed into production
as fast as possible.
Docker consists of:
* The Docker Engine - our lightweight and powerful open source containerization
technology combined with a work flow for building and containerizing your
applications.
* [Docker Hub](https://hub.docker.com) - our SaaS service for
sharing and managing your application stacks.
## Why Docker?
*Faster delivery of your applications*
* We want your environment to work better. Docker containers,
and the work flow that comes with them, help your developers,
sysadmins, QA folks, and release engineers work together to get your code
into production and make it useful. We've created a standard
container format that lets developers care about their applications
inside containers while sysadmins and operators can work on running the
container in your deployment. This separation of duties streamlines and
simplifies the management and deployment of code.
* We make it easy to build new containers, enable rapid iteration of
your applications, and increase the visibility of changes. This
helps everyone in your organization understand how an application works
and how it is built.
* Docker containers are lightweight and fast! Containers have
sub-second launch times, reducing the cycle
time of development, testing, and deployment.
*Deploy and scale more easily*
* Docker containers run (almost) everywhere. You can deploy
containers on desktops, physical servers, virtual machines, into
data centers, and up to public and private clouds.
* Since Docker runs on so many platforms, it's easy to move your
applications around. You can easily move an application from a
testing environment into the cloud and back whenever you need.
* Docker's lightweight containers also make scaling up and
down fast and easy. You can quickly launch more containers when
needed and then shut them down easily when they're no longer needed.
*Get higher density and run more workloads*
* Docker containers don't need a hypervisor, so you can pack more of
them onto your hosts. This means you get more value out of every
server and can potentially reduce what you spend on equipment and
licenses.
*Faster deployment makes for easier management*
* As Docker speeds up your work flow, it gets easier to make lots
of small changes instead of huge, big bang updates. Smaller
changes mean reduced risk and more uptime.
## About this guide
The [Understanding Docker section](understanding-docker.md) will help you:
- See how Docker works at a high level
- Understand the architecture of Docker
- Discover Docker's features;
- See how Docker compares to virtual machines
- See some common use cases.
### Installation guides
The [installation section](installation/index.md) will show you how to install Docker
on a variety of platforms.
### Docker user guide
To learn about Docker in more detail and to answer questions about usage and
implementation, check out the [Docker User Guide](userguide/index.md).
## Release notes
A summary of the changes in each release in the current series can now be found
on the separate [Release Notes page](https://docs.docker.com/release-notes)
## Feature Deprecation Policy
As changes are made to Docker there may be times when existing features
will need to be removed or replaced with newer features. Before an existing
feature is removed it will be labeled as "deprecated" within the documentation
and will remain in Docker for, usually, at least 3 releases. After that time
it may be removed.
Users are expected to take note of the list of deprecated features each
release and plan their migration away from those features, and (if applicable)
towards the replacement features as soon as possible.
The complete list of deprecated features can be found on the
[Deprecated Features page](deprecated.md).
## Licensing
Docker is licensed under the Apache License, Version 2.0. See
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
license text.

View file

@ -1,254 +0,0 @@
<!--[metadata]>
+++
title = "Installation from binaries"
description = "Instructions for installing Docker as a binary. Mostly meant for hackers who want to try out Docker on a variety of environments."
keywords = ["binaries, installation, docker, documentation, linux"]
[menu.main]
parent = "engine_install"
weight = 110
+++
<![end-metadata]-->
# Installation from binaries
**This instruction set is meant for hackers who want to try out Docker
on a variety of environments.**
Before following these directions, you should really check if a packaged
version of Docker is already available for your distribution. We have
packages for many distributions, and more keep showing up all the time!
## Check runtime dependencies
To run properly, docker needs the following software to be installed at
runtime:
- iptables version 1.4 or later
- Git version 1.7 or later
- procps (or similar provider of a "ps" executable)
- XZ Utils 4.9 or later
- a [properly mounted](
https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount
point [is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
## Check kernel dependencies
Docker in daemon mode has specific kernel requirements. For details,
check your distribution in [*Installation*](index.md#on-linux).
A 3.10 Linux kernel is the minimum requirement for Docker.
Kernels older than 3.10 lack some of the features required to run Docker
containers. These older versions are known to have bugs which cause data loss
and frequently panic under certain conditions.
The latest minor version (3.x.y) of the 3.10 (or a newer maintained version)
Linux kernel is recommended. Keeping the kernel up to date with the latest
minor version will ensure critical kernel bugs get fixed.
> **Warning**:
> Installing custom kernels and kernel packages is probably not
> supported by your Linux distribution's vendor. Please make sure to
> ask your vendor about Docker support first before attempting to
> install custom kernels on your distribution.
> **Warning**:
> Installing a newer kernel might not be enough for some distributions
> which provide packages which are too old or incompatible with
> newer kernels.
Note that Docker also has a client mode, which can run on virtually any
Linux kernel (it even builds on OS X!).
## Enable AppArmor and SELinux when possible
Please use AppArmor or SELinux if your Linux distribution supports
either of the two. This helps improve security and blocks certain
types of exploits. Your distribution's documentation should provide
detailed steps on how to enable the recommended security mechanism.
Some Linux distributions enable AppArmor or SELinux by default and
they run a kernel which doesn't meet the minimum requirements (3.10
or newer). Updating the kernel to 3.10 or newer on such a system
might not be enough to start Docker and run containers.
Incompatibilities between the version of AppArmor/SELinux user
space utilities provided by the system and the kernel could prevent
Docker from running, from starting containers or, cause containers to
exhibit unexpected behaviour.
> **Warning**:
> If either of the security mechanisms is enabled, it should not be
> disabled to make Docker or its containers run. This will reduce
> security in that environment, lose support from the distribution's
> vendor for the system, and might break regulations and security
> policies in heavily regulated environments.
## Get the Docker Engine binaries
You can download either the latest release binaries or a specific version. To get
the list of stable release version numbers from GitHub, view the `docker/docker`
[releases page](https://github.com/docker/docker/releases). You can get the MD5
and SHA256 hashes by appending .md5 and .sha256 to the URLs respectively
### Get the Linux binaries
To download the latest version for Linux, use the
following URLs:
https://get.docker.com/builds/Linux/i386/docker-latest.tgz
https://get.docker.com/builds/Linux/x86_64/docker-latest.tgz
To download a specific version for Linux, use the
following URL patterns:
https://get.docker.com/builds/Linux/i386/docker-<version>.tgz
https://get.docker.com/builds/Linux/x86_64/docker-<version>.tgz
For example:
https://get.docker.com/builds/Linux/i386/docker-1.11.0.tgz
https://get.docker.com/builds/Linux/x86_64/docker-1.11.0.tgz
> **Note** These instructions are for Docker Engine 1.11 and up. Engine 1.10 and
> under consists of a single binary, and instructions for those versions are
> different. To install version 1.10 or below, follow the instructions in the
> <a href="https://docs.docker.com/v1.10/engine/installation/binaries/" target="_blank">1.10 documentation</a>.
#### Install the Linux binaries
After downloading, you extract the archive, which puts the binaries in a
directory named `docker` in your current location.
```bash
$ tar -xvzf docker-latest.tgz
docker/
docker/docker
docker/docker-containerd
docker/docker-containerd-ctr
docker/docker-containerd-shim
docker/docker-proxy
docker/docker-runc
docker/dockerd
```
Engine requires these binaries to be installed in your host's `$PATH`.
For example, to install the binaries in `/usr/bin`:
```bash
$ mv docker/* /usr/bin/
```
> **Note**: Depending on your current setup, you can specify custom paths
> for some of the binaries provided.
> **Note**: If you already have Engine installed on your host, make sure you
> stop Engine before installing (`killall docker`), and install the binaries
> in the same location. You can find the location of the current installation
> with `dirname $(which docker)`.
#### Run the Engine daemon on Linux
You can manually start the Engine in daemon mode using:
```bash
$ sudo dockerd &
```
The GitHub repository provides samples of init-scripts you can use to control
the daemon through a process manager, such as upstart or systemd. You can find
these scripts in the <a href="https://github.com/docker/docker/tree/master/contrib/init">
contrib directory</a>.
For additional information about running the Engine in daemon mode, refer to
the [daemon command](../reference/commandline/dockerd.md) in the Engine command
line reference.
### Get the Mac OS X binary
The Mac OS X binary is only a client. You cannot use it to run the `docker`
daemon. To download the latest version for Mac OS X, use the following URLs:
https://get.docker.com/builds/Darwin/x86_64/docker-latest.tgz
To download a specific version for Mac OS X, use the
following URL pattern:
https://get.docker.com/builds/Darwin/x86_64/docker-<version>.tgz
For example:
https://get.docker.com/builds/Darwin/x86_64/docker-1.11.0.tgz
You can extract the downloaded archive either by double-clicking the downloaded
`.tgz` or on the command line, using `tar -xvzf docker-1.11.0.tgz`. The client
binary can be executed from any location on your filesystem.
### Get the Windows binary
You can only download the Windows binary for version `1.9.1` onwards.
Moreover, the 32-bit (`i386`) binary is only a client, you cannot use it to
run the `docker` daemon. The 64-bit binary (`x86_64`) is both a client and
daemon.
To download the latest version for Windows, use the following URLs:
https://get.docker.com/builds/Windows/i386/docker-latest.zip
https://get.docker.com/builds/Windows/x86_64/docker-latest.zip
To download a specific version for Windows, use the following URL pattern:
https://get.docker.com/builds/Windows/i386/docker-<version>.zip
https://get.docker.com/builds/Windows/x86_64/docker-<version>.zip
For example:
https://get.docker.com/builds/Windows/i386/docker-1.11.0.zip
https://get.docker.com/builds/Windows/x86_64/docker-1.11.0.zip
> **Note** These instructions are for Engine 1.11 and up. Instructions for older
> versions are slightly different. To install version 1.10 or below, follow the
> instructions in the <a href="https://docs.docker.com/v1.10/engine/installation/binaries/" target="_blank">1.10 documentation</a>.
## Giving non-root access
The `docker` daemon always runs as the root user, and the `docker`
daemon binds to a Unix socket instead of a TCP port. By default that
Unix socket is owned by the user *root*, and so, by default, you can
access it with `sudo`.
If you (or your Docker installer) create a Unix group called *docker*
and add users to it, then the `docker` daemon will make the ownership of
the Unix socket read/writable by the *docker* group when the daemon
starts. The `docker` daemon must always run as the root user, but if you
run the `docker` client as a user in the *docker* group then you don't
need to add `sudo` to all the client commands.
> **Warning**:
> The *docker* group (or the group specified with `-G`) is root-equivalent;
> see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details.
## Upgrade Docker Engine
To upgrade your manual installation of Docker Engine on Linux, first kill the docker
daemon:
$ killall docker
Then follow the [regular installation steps](#get-the-linux-binaries).
## Next steps
Continue with the [User Guide](../userguide/index.md).

View file

@ -1,208 +0,0 @@
<!--[metadata]>
+++
title = "Example: Manual install on cloud provider"
description = "Example of a manual install of Docker Engine on a cloud provider, using Amazon Web Services (AWS) EC2. Shows how to create an EC2 instance, and install Docker Engine on it."
keywords = ["cloud, docker, machine, documentation, installation, AWS, EC2"]
[menu.main]
parent = "install_cloud"
+++
<![end-metadata]-->
# Example: Manual install on cloud provider
You can install Docker Engine directly to servers you have on cloud providers. This example shows how to create an <a href="https://aws.amazon.com/" target="_blank"> Amazon Web Services (AWS)</a> EC2 instance, and install Docker Engine on it.
You can use this same general approach to create Dockerized hosts on other cloud providers.
### Step 1. Sign up for AWS
1. If you are not already an AWS user, sign up for <a href="https://aws.amazon.com/" target="_blank"> AWS</a> to create an account and get root access to EC2 cloud computers. If you have an Amazon account, you can use it as your root user account.
2. Create an IAM (Identity and Access Management) administrator user, an admin group, and a key pair associated with a region.
From the AWS menus, select **Services** > **IAM** to get started.
See the AWS documentation on <a href="http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/get-set-up-for-amazon-ec2.html" target="_blank">Setting Up with Amazon EC2</a>. Follow the steps for "Create an IAM User" and "Create a Key Pair".
If you are just getting started with AWS and EC2, you do not need to create a virtual private cloud (VPC) or specify a subnet. The newer EC2-VPC platform (accounts created after 2013-12-04) comes with a default VPC and subnet in each availability zone. When you launch an instance, it automatically uses the default VPC.
### Step 2. Configure and start an EC2 instance
Launch an instance to create a virtual machine (VM) with a specified operating system (OS) as follows.
1. Log into AWS with your IAM credentials.
On the AWS home page, click **EC2** to go to the dashboard, then click **Launch Instance**.
![EC2 dashboard](../images/ec2_launch_instance.png)
AWS EC2 virtual servers are called *instances* in Amazon parlance. Once you set up an account, IAM user and key pair, you are ready to launch an instance. It is at this point that you select the OS for the VM.
2. Choose an Amazon Machine Image (AMI) with the OS and applications you want. For this example, we select an Ubuntu server.
![Launch Ubuntu](../images/ec2-ubuntu.png)
3. Choose an instance type.
![Choose a general purpose instance type](../images/ec2_instance_type.png)
4. Configure the instance.
You can select the default network and subnet, which are inherently linked to a region and availability zone.
![Configure the instance](../images/ec2_instance_details.png)
5. Click **Review and Launch**.
6. Select a key pair to use for this instance.
When you choose to launch, you need to select a key pair to use. Save the `.pem` file to use in the next steps.
The instance is now up-and-running. The menu path to get back to your EC2 instance on AWS is: **EC2 (Virtual Servers in Cloud)** > **EC2 Dashboard** > **Resources** > **Running instances**.
To get help with your private key file, instance IP address, and how to log into the instance via SSH, click the **Connect** button at the top of the AWS instance dashboard.
### Step 3. Log in from a terminal, configure apt, and get packages
1. Log in to the EC2 instance from a command line terminal.
Change directories into the directory containing the SSH key and run this command (or give the path to it as part of the command):
$ ssh -i "YourKey" ubuntu@xx.xxx.xxx.xxx
For our example:
$ cd ~/Desktop/keys/amazon_ec2
$ ssh -i "my-key-pair.pem" ubuntu@xx.xxx.xxx.xxx
We'll follow the instructions for installing Docker on Ubuntu at https://docs.docker.com/engine/installation/ubuntulinux/. The next few steps reflect those instructions.
2. Check the kernel version to make sure it's 3.10 or higher.
ubuntu@ip-xxx-xx-x-xxx:~$ uname -r
3.13.0-48-generic
3. Add the new `gpg` key.
ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
Executing: gpg --ignore-time-conflict --no-options --no-default-keyring --homedir /tmp/tmp.jNZLKNnKte --no-auto-check-trustdb --trust-model always --keyring /etc/apt/trusted.gpg --primary-keyring /etc/apt/trusted.gpg --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
gpg: requesting key 2C52609D from hkp server p80.pool.sks-keyservers.net
gpg: key 2C52609D: public key "Docker Release Tool (releasedocker) <docker@docker.com>" imported
gpg: Total number processed: 1
gpg: imported: 1 (RSA: 1)
4. Create a `docker.list` file, and add an entry for our OS, Ubuntu Trusty 14.04 (LTS).
ubuntu@ip-xxx-xx-x-xxx:~$ sudo vi /etc/apt/sources.list.d/docker.list
If we were updating an existing file, we'd delete any existing entries.
5. Update the `apt` package index.
ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get update
6. Purge the old repo if it exists.
In our case the repo doesn't because this is a new VM, but let's run it anyway just to be sure.
ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get purge lxc-docker
Reading package lists... Done
Building dependency tree
Reading state information... Done
Package 'lxc-docker' is not installed, so not removed
0 upgraded, 0 newly installed, 0 to remove and 139 not upgraded.
7. Verify that `apt` is pulling from the correct repository.
ubuntu@ip-172-31-0-151:~$ sudo apt-cache policy docker-engine
docker-engine:
Installed: (none)
Candidate: 1.9.1-0~trusty
Version table:
1.9.1-0~trusty 0
500 https://apt.dockerproject.org/repo/ ubuntu-trusty/main amd64 Packages
1.9.0-0~trusty 0
500 https://apt.dockerproject.org/repo/ ubuntu-trusty/main amd64 Packages
. . .
From now on when you run `apt-get upgrade`, `apt` pulls from the new repository.
### Step 4. Install recommended prerequisites for the OS
For Ubuntu Trusty (and some other versions), its recommended to install the `linux-image-extra` kernel package, which allows you use the `aufs` storage driver, so we'll do that now.
ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get update
ubuntu@ip-172-31-0-151:~$ sudo apt-get install linux-image-extra-$(uname -r)
### Step 5. Install Docker Engine on the remote instance
1. Update the apt package index.
ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get update
2. Install Docker Engine.
ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get install docker-engine
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following extra packages will be installed:
aufs-tools cgroup-lite git git-man liberror-perl
Suggested packages:
git-daemon-run git-daemon-sysvinit git-doc git-el git-email git-gui gitk
gitweb git-arch git-bzr git-cvs git-mediawiki git-svn
The following NEW packages will be installed:
aufs-tools cgroup-lite docker-engine git git-man liberror-perl
0 upgraded, 6 newly installed, 0 to remove and 139 not upgraded.
Need to get 11.0 MB of archives.
After this operation, 60.3 MB of additional disk space will be used.
Do you want to continue? [Y/n] y
Get:1 http://us-west-1.ec2.archive.ubuntu.com/ubuntu/ trusty/universe aufs-tools amd64 1:3.2+20130722-1.1 [92.3 kB]
Get:2 http://us-west-1.ec2.archive.ubuntu.com/ubuntu/ trusty/main liberror-perl all 0.17-1.1 [21.1 kB]
. . .
3. Start the Docker daemon.
ubuntu@ip-xxx-xx-x-xxx:~$ sudo service docker start
4. Verify Docker Engine is installed correctly by running `docker run hello-world`.
ubuntu@ip-xxx-xx-x-xxx:~$ sudo docker run hello-world
ubuntu@ip-172-31-0-151:~$ sudo docker run hello-world
Unable to find image 'hello-world:latest' locally
latest: Pulling from library/hello-world
b901d36b6f2f: Pull complete
0a6ba66e537a: Pull complete
Digest: sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7
Status: Downloaded newer image for hello-world:latest
Hello from Docker.
This message shows that your installation appears to be working correctly.
To generate this message, Docker took the following steps:
1. The Docker client contacted the Docker daemon.
2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading.
4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal.
To try something more ambitious, you can run an Ubuntu container with:
$ docker run -it ubuntu bash
Share images, automate workflows, and more with a free Docker Hub account:
https://hub.docker.com
For more examples and ideas, visit:
https://docs.docker.com/userguide/
## Where to go next
_Looking for a quicker way to do Docker cloud installs and provision multiple hosts?_ You can use [Docker Machine](https://docs.docker.com/machine/overview/) to provision hosts.
* [Use Docker Machine to provision hosts on cloud providers](https://docs.docker.com/machine/get-started-cloud/)
* [Docker Machine driver reference](https://docs.docker.com/machine/drivers/)
* [Install Docker Engine](../index.md)
* [Docker User Guide](../../userguide/intro.md)

View file

@ -1,201 +0,0 @@
<!--[metadata]>
+++
title = "Example: Use Docker Machine to provision cloud hosts"
description = "Example of using Docker Machine to install Docker Engine on a cloud provider, using Digital Ocean."
keywords = ["cloud, docker, machine, documentation, installation, digitalocean"]
[menu.main]
parent = "install_cloud"
+++
<![end-metadata]-->
# Example: Use Docker Machine to provision cloud hosts
Docker Machine driver plugins are available for many cloud platforms, so you can use Machine to provision cloud hosts. When you use Docker Machine for provisioning, you create cloud hosts with Docker Engine installed on them.
You'll need to install and run Docker Machine, and create an account with the cloud provider.
Then you provide account verification, security credentials, and configuration options for the providers as flags to `docker-machine create`. The flags are unique for each cloud-specific driver. For instance, to pass a Digital Ocean access token, you use the `--digitalocean-access-token` flag.
As an example, let's take a look at how to create a Dockerized <a href="https://digitalocean.com" target="_blank">Digital Ocean</a> _Droplet_ (cloud server).
### Step 1. Create a Digital Ocean account and log in
If you have not done so already, go to <a href="https://digitalocean.com" target="_blank">Digital Ocean</a>, create an account, and log in.
### Step 2. Generate a personal access token
To generate your access token:
1. Go to the Digital Ocean administrator console and click **API** in the header.
![Click API in Digital Ocean console](../images/ocean_click_api.png)
2. Click **Generate New Token** to get to the token generator.
![Generate token](../images/ocean_gen_token.png)
3. Give the token a clever name (e.g. "machine"), make sure the **Write (Optional)** checkbox is checked, and click **Generate Token**.
![Name and generate token](../images/ocean_token_create.png)
4. Grab (copy to clipboard) the generated big long hex string and store it somewhere safe.
![Copy and save personal access token](../images/ocean_save_token.png)
This is the personal access token you'll use in the next step to create your cloud server.
### Step 3. Install Docker Machine
1. If you have not done so already, install Docker Machine on your local host.
* <a href="https://docs.docker.com/engine/installation/mac/" target="_blank"> How to install Docker Machine on Mac OS X</a>
* <a href="https://docs.docker.com/engine/installation/windows/" target="_blank">How to install Docker Machine on Windows</a>
* <a href="https://docs.docker.com/machine/install-machine/" target="_blank">Install Docker Machine directly</a> (e.g., on Linux)
2. At a command terminal, use `docker-machine ls` to get a list of Docker Machines and their status.
$ docker-machine ls
NAME ACTIVE DRIVER STATE URL SWARM
default * virtualbox Running tcp:////xxx.xxx.xx.xxx:xxxx
6. Run some Docker commands to make sure that Docker Engine is also up-and-running.
We'll run `docker run hello-world` again, but you could try `docker ps`, `docker run docker/whalesay cowsay boo`, or another command to verify that Docker is running.
$ docker run hello-world
Hello from Docker.
This message shows that your installation appears to be working correctly.
...
### Step 4. Use Machine to Create the Droplet
1. Run `docker-machine create` with the `digitalocean` driver and pass your key to the `--digitalocean-access-token` flag, along with a name for the new cloud server.
For this example, we'll call our new Droplet "docker-sandbox".
$ docker-machine create --driver digitalocean --digitalocean-access-token xxxxx docker-sandbox
Running pre-create checks...
Creating machine...
(docker-sandbox) OUT | Creating SSH key...
(docker-sandbox) OUT | Creating Digital Ocean droplet...
(docker-sandbox) OUT | Waiting for IP address to be assigned to the Droplet...
Waiting for machine to be running, this may take a few minutes...
Machine is running, waiting for SSH to be available...
Detecting operating system of created instance...
Detecting the provisioner...
Provisioning created instance...
Copying certs to the local machine directory...
Copying certs to the remote machine...
Setting Docker configuration on the remote daemon...
To see how to connect Docker to this machine, run: docker-machine env docker-sandbox
When the Droplet is created, Docker generates a unique SSH key and stores it on your local system in `~/.docker/machines`. Initially, this is used to provision the host. Later, it's used under the hood to access the Droplet directly with the `docker-machine ssh` command. Docker Engine is installed on the cloud server and the daemon is configured to accept remote connections over TCP using TLS for authentication.
2. Go to the Digital Ocean console to view the new Droplet.
![Droplet in Digital Ocean created with Machine](../images/ocean_droplet.png)
3. At the command terminal, run `docker-machine ls`.
$ docker-machine ls
NAME ACTIVE DRIVER STATE URL SWARM
default * virtualbox Running tcp://192.168.99.100:2376
docker-sandbox - digitalocean Running tcp://45.55.139.48:2376
Notice that the new cloud server is running but is not the active host. Our command shell is still connected to the default machine, which is currently the active host as indicated by the asterisk (*).
4. Run `docker-machine env docker-sandbox` to get the environment commands for the new remote host, then run `eval` as directed to re-configure the shell to connect to `docker-sandbox`.
$ docker-machine env docker-sandbox
export DOCKER_TLS_VERIFY="1"
export DOCKER_HOST="tcp://45.55.222.72:2376"
export DOCKER_CERT_PATH="/Users/victoriabialas/.docker/machine/machines/docker-sandbox"
export DOCKER_MACHINE_NAME="docker-sandbox"
# Run this command to configure your shell:
# eval "$(docker-machine env docker-sandbox)"
$ eval "$(docker-machine env docker-sandbox)"
5. Re-run `docker-machine ls` to verify that our new server is the active machine, as indicated by the asterisk (*) in the ACTIVE column.
$ docker-machine ls
NAME ACTIVE DRIVER STATE URL SWARM
default - virtualbox Running tcp://192.168.99.100:2376
docker-sandbox * digitalocean Running tcp://45.55.222.72:2376
6. Run some `docker-machine` commands to inspect the remote host. For example, `docker-machine ip <machine>` gets the host IP address and `docker-machine inspect <machine>` lists all the details.
$ docker-machine ip docker-sandbox
104.131.43.236
$ docker-machine inspect docker-sandbox
{
"ConfigVersion": 3,
"Driver": {
"IPAddress": "104.131.43.236",
"MachineName": "docker-sandbox",
"SSHUser": "root",
"SSHPort": 22,
"SSHKeyPath": "/Users/samanthastevens/.docker/machine/machines/docker-sandbox/id_rsa",
"StorePath": "/Users/samanthastevens/.docker/machine",
"SwarmMaster": false,
"SwarmHost": "tcp://0.0.0.0:3376",
"SwarmDiscovery": "",
...
7. Verify Docker Engine is installed correctly by running `docker` commands.
Start with something basic like `docker run hello-world`, or for a more interesting test, run a Dockerized webserver on your new remote machine.
In this example, the `-p` option is used to expose port 80 from the `nginx` container and make it accessible on port `8000` of the `docker-sandbox` host.
$ docker run -d -p 8000:80 --name webserver kitematic/hello-world-nginx
Unable to find image 'kitematic/hello-world-nginx:latest' locally
latest: Pulling from kitematic/hello-world-nginx
a285d7f063ea: Pull complete
2d7baf27389b: Pull complete
...
Digest: sha256:ec0ca6dcb034916784c988b4f2432716e2e92b995ac606e080c7a54b52b87066
Status: Downloaded newer image for kitematic/hello-world-nginx:latest
942dfb4a0eaae75bf26c9785ade4ff47ceb2ec2a152be82b9d7960e8b5777e65
In a web browser, go to `http://<host_ip>:8000` to bring up the webserver home page. You got the `<host_ip>` from the output of the `docker-machine ip <machine>` command you ran in a previous step. Use the port you exposed in the `docker run` command.
![nginx webserver](../images/nginx-webserver.png)
#### Understand the defaults and options on the create command
For convenience, `docker-machine` will use sensible defaults for choosing settings such as the image that the server is based on, but you override the defaults using the respective flags (e.g. `--digitalocean-image`). This is useful if, for example, you want to create a cloud server with a lot of memory and CPUs (by default `docker-machine` creates a small server). For a full list of the flags/settings available and their defaults, see the output of `docker-machine create -h` at the command line. See also <a href="https://docs.docker.com/machine/drivers/os-base/" target="_blank">Driver options and operating system defaults</a> and information about the <a href="https://docs.docker.com/machine/reference/create/" target="_blank">create</a> command in the Docker Machine documentation.
### Step 5. Use Machine to remove the Droplet
To remove a host and all of its containers and images, first stop the machine, then use `docker-machine rm`:
$ docker-machine stop docker-sandbox
$ docker-machine rm docker-sandbox
Do you really want to remove "docker-sandbox"? (y/n): y
Successfully removed docker-sandbox
$ docker-machine ls
NAME ACTIVE DRIVER STATE URL SWARM
default * virtualbox Running tcp:////xxx.xxx.xx.xxx:xxxx
If you monitor the Digital Ocean console while you run these commands, you will see it update first to reflect that the Droplet was stopped, and then removed.
If you create a host with Docker Machine, but remove it through the cloud provider console, Machine will lose track of the server status. So please use the `docker-machine rm` command for hosts you create with `docker-machine --create`.
## Where to go next
* [Docker Machine driver reference](https://docs.docker.com/machine/drivers/)
* [Docker Machine Overview](https://docs.docker.com/machine/overview/)
* [Use Docker Machine to provision hosts on cloud providers](https://docs.docker.com/machine/get-started-cloud/)
* [Install Docker Engine](../../installation/index.md)
* [Docker User Guide](../../userguide/intro.md)

View file

@ -1,25 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/engine/installation/amazon/",
"/engine/installation/google/",
"/engine/installation/softlayer/",
"/engine/installation/azure/",
"/engine/installation/rackspace/",
"/engine/installation/joyent/"
]
title = "On cloud providers"
description = "Cloud Installations"
keywords = ["Docker install "]
[menu.main]
identifier = "install_cloud"
parent="engine_install"
weight="-60"
+++
<![end-metadata]-->
# Install Engine in the cloud
* [Understand cloud install options and choose one](overview.md)
* [Example: Use Machine to provision cloud hosts](cloud-ex-machine-ocean.md)
* [Example: Manual install on a cloud provider](cloud-ex-aws.md)

View file

@ -1,56 +0,0 @@
<!--[metadata]>
+++
aliases = [
"/engine/installation/cloud/cloud/",
]
title = "Choose how to install"
description = "Installation instructions for Docker on cloud."
keywords = ["cloud, docker, machine, documentation, installation"]
[menu.main]
parent = "install_cloud"
weight=-3
+++
<![end-metadata]-->
# Choose how to install
You can install Docker Engine on any cloud platform that runs an operating system (OS) that Docker supports. This includes many flavors and versions of Linux, along with Mac and Windows.
You have two options for installing:
* Manually install on the cloud (create cloud hosts, then install Docker Engine on them)
* Use Docker Machine to provision cloud hosts
## Manually install Docker Engine on a cloud host
To install on a cloud provider:
1. Create an account with the cloud provider, and read cloud provider documentation to understand their process for creating hosts.
2. Decide which OS you want to run on the cloud host.
3. Understand the Docker prerequisites and install process for the chosen OS. See [Install Docker Engine](../index.md) for a list of supported systems and links to the install guides.
4. Create a host with a Docker supported OS, and install Docker per the instructions for that OS.
[Example (AWS): Manual install on a cloud provider](cloud-ex-aws.md) shows how to create an <a href="https://aws.amazon.com/" target="_blank"> Amazon Web Services (AWS)</a> EC2 instance, and install Docker Engine on it.
## Use Docker Machine to provision cloud hosts
Docker Machine driver plugins are available for several popular cloud platforms, so you can use Machine to provision one or more Dockerized hosts on those platforms.
With Docker Machine, you can use the same interface to create cloud hosts with Docker Engine on them, each configured per the options you specify.
To do this, you use the `docker-machine create` command with the driver for the cloud provider, and provider-specific flags for account verification, security credentials, and other configuration details.
[Example: Use Docker Machine to provision cloud hosts](cloud-ex-machine-ocean.md) walks you through the steps to set up Docker Machine and provision a Dockerized host on <a href="https://www.digitalocean.com/" target="_blank">Digital Ocean</a>).
## Where to go next
* [Example: Manual install on a cloud provider](cloud-ex-aws.md) (AWS EC2)
* [Example: Use Docker Machine to provision cloud hosts](cloud-ex-machine-ocean.md) (Digital Ocean)
* For supported platforms, see [Install Docker Engine](../index.md).
* To get started with Docker post-install, see [Docker User Guide](../../userguide/intro.md).

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 232 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 229 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 155 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 137 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 152 KiB

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 134 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Some files were not shown because too many files have changed in this diff Show more