remove rst/sphinx documentation

Docker-DCO-1.1-Signed-off-by: Sven Dowideit <SvenDowideit@fosiki.com> (github: SvenDowideit)
This commit is contained in:
Sven Dowideit 2014-05-01 15:31:58 +10:00
parent 9da75eb4df
commit adf04681b4
117 changed files with 7 additions and 47555 deletions

View file

@ -8,11 +8,6 @@ RUN apt-get update && apt-get install -yq make python-pip python-setuptools vim
RUN pip install mkdocs
# installing sphinx for the rst->md conversion only - will be removed after May release
# pip installs from docs/requirements.txt, but here to increase cacheability
RUN pip install Sphinx==1.2.1
RUN pip install sphinxcontrib-httpdomain==1.2.0
# add MarkdownTools to get transclusion
# (future development)
#RUN easy_install -U setuptools
@ -33,12 +28,6 @@ ADD . /docs
ADD MAINTAINERS /docs/sources/humans.txt
WORKDIR /docs
#build the sphinx html
#RUN make -C /docs clean docs
#convert to markdown
#RUN ./convert.sh
RUN VERSION=$(cat /docs/VERSION) &&\
GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\
AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\

View file

@ -1,185 +0,0 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
PYTHON = python
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) sources
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
# @echo " html to make standalone HTML files"
# @echo " dirhtml to make HTML files named index.html in directories"
# @echo " singlehtml to make a single large HTML file"
# @echo " pickle to make pickle files"
# @echo " json to make JSON files"
# @echo " htmlhelp to make HTML files and a HTML help project"
# @echo " qthelp to make HTML files and a qthelp project"
# @echo " devhelp to make HTML files and a Devhelp project"
# @echo " epub to make an epub"
# @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
# @echo " latexpdf to make LaTeX files and run them through pdflatex"
# @echo " text to make text files"
@echo " man to make a manual page"
# @echo " texinfo to make Texinfo files"
# @echo " info to make Texinfo files and run them through makeinfo"
# @echo " gettext to make PO message catalogs"
# @echo " changes to make an overview of all changed/added/deprecated items"
# @echo " linkcheck to check all external links for integrity"
# @echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " docs to build the docs and copy the static files to the outputdir"
@echo " server to serve the docs in your browser under \`http://localhost:8000\`"
@echo " publish to publish the app to dotcloud"
clean:
-rm -rf $(BUILDDIR)/*
docs:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The documentation pages are now in $(BUILDDIR)/html."
server: docs
@cd $(BUILDDIR)/html; $(PYTHON) -m SimpleHTTPServer 8000
site:
cp -r website $(BUILDDIR)/
cp -r theme/docker/static/ $(BUILDDIR)/website/
@echo
@echo "The Website pages are in $(BUILDDIR)/site."
connect:
@echo connecting dotcloud to www.docker.io website, make sure to use user 1
@echo or create your own "dockerwebsite" app
@cd $(BUILDDIR)/website/ ; \
dotcloud connect dockerwebsite ; \
dotcloud list
push:
@cd $(BUILDDIR)/website/ ; \
dotcloud push
$(VERSIONS):
@echo "Hello world"
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Docker.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Docker.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/Docker"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Docker"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View file

@ -1,86 +0,0 @@
diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md
index 6e072f6..5a4537d 100644
--- a/docs/sources/examples/hello_world.md
+++ b/docs/sources/examples/hello_world.md
@@ -59,6 +59,9 @@ standard out.
See the example in action
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type=&quot;text/javascript&quot;src=&quot;https://asciinema.org/a/7658.js&quot;id=&quot;asciicast-7658&quot; async></script></body>"></iframe>
+
+
## Hello World Daemon
Note
@@ -142,6 +145,8 @@ Make sure it is really stopped.
See the example in action
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type=&quot;text/javascript&quot;src=&quot;https://asciinema.org/a/2562.js&quot;id=&quot;asciicast-2562&quot; async></script></body>"></iframe>
+
The next example in the series is a [*Node.js Web
App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
any of the other examples:
diff --git a/docs/asciinema.patch b/docs/asciinema.patch
index e240bf3..e69de29 100644
--- a/docs/asciinema.patch
+++ b/docs/asciinema.patch
@@ -1,23 +0,0 @@
-diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md
-index 6e072f6..5a4537d 100644
---- a/docs/sources/examples/hello_world.md
-+++ b/docs/sources/examples/hello_world.md
-@@ -59,6 +59,9 @@ standard out.
-
- See the example in action
-
-+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type=&quot;text/javascript&quot;src=&quot;https://asciinema.org/a/7658.js&quot;id=&quot;asciicast-7658&quot; async></script></body>"></iframe>
-+
-+
- ## Hello World Daemon
-
- Note
-@@ -142,6 +145,8 @@ Make sure it is really stopped.
-
- See the example in action
-
-+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type=&quot;text/javascript&quot;src=&quot;https://asciinema.org/a/2562.js&quot;id=&quot;asciicast-2562&quot; async></script></body>"></iframe>
-+
- The next example in the series is a [*Node.js Web
- App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
- any of the other examples:
diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md
index 6e072f6..c277f38 100644
--- a/docs/sources/examples/hello_world.md
+++ b/docs/sources/examples/hello_world.md
@@ -59,6 +59,8 @@ standard out.
See the example in action
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type=&quot;text/javascript&quot;src=&quot;https://asciinema.org/a/7658.js&quot;id=&quot;asciicast-7658&quot; async></script></body>"></iframe>
+
## Hello World Daemon
Note
@@ -142,6 +144,8 @@ Make sure it is really stopped.
See the example in action
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type=&quot;text/javascript&quot;src=&quot;https://asciinema.org/a/2562.js&quot;id=&quot;asciicast-2562&quot; async></script></body>"></iframe>
+
The next example in the series is a [*Node.js Web
App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
any of the other examples:
diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md
index 2122b8d..49edbc8 100644
--- a/docs/sources/use/workingwithrepository.md
+++ b/docs/sources/use/workingwithrepository.md
@@ -199,6 +199,8 @@ searchable (or indexed at all) in the Central Index, and there will be
no user name checking performed. Your registry will function completely
independently from the Central Index.
+<iframe width="640" height="360" src="//www.youtube.com/embed/CAewZCBT4PI?rel=0" frameborder="0" allowfullscreen></iframe>
+
See also
[Docker Blog: How to use your own

View file

@ -1,53 +0,0 @@
#!/bin/sh
cd /
#run the sphinx build first
make -C /docs clean docs
cd /docs
#find sources -name '*.md*' -exec rm '{}' \;
# convert from rst to md for mkdocs.org
# TODO: we're using a sphinx specific rst thing to do between docs links, which we then need to convert to mkdocs specific markup (and pandoc loses it when converting to html / md)
HTML_FILES=$(find _build -name '*.html' | sed 's/_build\/html\/\(.*\)\/index.html/\1/')
for name in ${HTML_FILES}
do
echo $name
# lets not use gratuitious unicode quotes that cause terrible copy and paste issues
sed -i 's/&#8220;/"/g' _build/html/${name}/index.html
sed -i 's/&#8221;/"/g' _build/html/${name}/index.html
pandoc -f html -t markdown --atx-headers -o sources/${name}.md1 _build/html/${name}/index.html
#add the meta-data from the rst
egrep ':(title|description|keywords):' sources/${name}.rst | sed 's/^:/page_/' > sources/${name}.md
echo >> sources/${name}.md
#cat sources/${name}.md1 >> sources/${name}.md
# remove the paragraph links from the source
cat sources/${name}.md1 | sed 's/\[..\](#.*)//' >> sources/${name}.md
rm sources/${name}.md1
sed -i 's/{.docutils .literal}//g' sources/${name}.md
sed -i 's/{.docutils$//g' sources/${name}.md
sed -i 's/^.literal} //g' sources/${name}.md
sed -i 's/`{.descname}`//g' sources/${name}.md
sed -i 's/{.descname}//g' sources/${name}.md
sed -i 's/{.xref}//g' sources/${name}.md
sed -i 's/{.xref .doc .docutils .literal}//g' sources/${name}.md
sed -i 's/{.xref .http .http-post .docutils$//g' sources/${name}.md
sed -i 's/^ .literal}//g' sources/${name}.md
sed -i 's/\\\$container\\_id/\$container_id/' sources/examples/hello_world.md
sed -i 's/\\\$TESTFLAGS/\$TESTFLAGS/' sources/contributing/devenvironment.md
sed -i 's/\\\$MYVAR1/\$MYVAR1/g' sources/reference/commandline/cli.md
# git it all so we can test
# git add ${name}.md
done
#annoyingly, there are lots of failures
patch --fuzz 50 -t -p2 < pr4923.patch || true
patch --fuzz 50 -t -p2 < asciinema.patch || true

View file

@ -1,197 +0,0 @@
diff --git a/docs/Dockerfile b/docs/Dockerfile
index bc2b73b..b9808b2 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -4,14 +4,24 @@ MAINTAINER SvenDowideit@docker.com
# docker build -t docker:docs . && docker run -p 8000:8000 docker:docs
#
-RUN apt-get update && apt-get install -yq make python-pip python-setuptools
-
+RUN apt-get update && apt-get install -yq make python-pip python-setuptools
RUN pip install mkdocs
+RUN apt-get install -yq vim-tiny git pandoc
+
+# pip installs from docs/requirements.txt, but here to increase cacheability
+RUN pip install Sphinx==1.2.1
+RUN pip install sphinxcontrib-httpdomain==1.2.0
+
ADD . /docs
+
+#build the sphinx html
+RUN make -C /docs clean docs
+
WORKDIR /docs
-CMD ["mkdocs", "serve"]
+#CMD ["mkdocs", "serve"]
+CMD bash
# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525
EXPOSE 8000
diff --git a/docs/theme/docker/layout.html b/docs/theme/docker/layout.html
index 7d78fb9..0dac9e0 100755
--- a/docs/theme/docker/layout.html
+++ b/docs/theme/docker/layout.html
@@ -63,48 +63,6 @@
<body>
-<div id="wrap">
-<div class="navbar navbar-static-top navbar-inner navbar-fixed-top ">
- <div class="navbar-dotcloud">
- <div class="container">
-
- <div style="float: right" class="pull-right">
- <ul class="nav">
- <li id="nav-introduction"><a href="http://www.docker.io/" title="Docker Homepage">Home</a></li>
- <li id="nav-about"><a href="http://www.docker.io/about/" title="About">About</a></li>
- <li id="nav-gettingstarted"><a href="http://www.docker.io/gettingstarted/">Getting started</a></li>
- <li id="nav-community"><a href="http://www.docker.io/community/" title="Community">Community</a></li>
- <li id="nav-documentation" class="active"><a href="http://docs.docker.io/en/latest/">Documentation</a></li>
- <li id="nav-blog"><a href="http://blog.docker.io/" title="Docker Blog">Blog</a></li>
- <li id="nav-index"><a href="http://index.docker.io/" title="Docker Image Index, find images here">INDEX <img class="inline-icon" alt="link to external site" src="{{ pathto('_static/img/external-link-icon.png', 1) }}" title="external link"> </a></li>
- </ul>
- </div>
-
- <div class="brand-logo">
- <a href="http://www.docker.io" title="Docker Homepage"><img src="{{ pathto('_static/img/docker-top-logo.png', 1) }}" alt="Docker logo"></a>
- </div>
- </div>
- </div>
-</div>
-
-<div class="container-fluid">
-
- <!-- Docs nav
- ================================================== -->
- <div class="row-fluid main-row">
-
- <div class="sidebar bs-docs-sidebar">
- <div class="page-title" >
- <h4>DOCUMENTATION</h4>
- </div>
-
- {{ toctree(collapse=False, maxdepth=3) }}
- <form>
- <input type="text" id="st-search-input" class="st-search-input span3" placeholder="search in documentation" style="width:210px;" />
- <div id="st-results-container"></div>
- </form>
- </div>
-
<!-- body block -->
<div class="main-content">
@@ -114,111 +72,7 @@
{% block body %}{% endblock %}
</section>
- <div class="pull-right"><a href="https://github.com/dotcloud/docker/blob/{{ github_tag }}/docs/sources/{{ pagename }}.rst" title="edit this article">Edit this article on GitHub</a></div>
</div>
- </div>
-</div>
-
-<div id="push-the-footer"></div>
-</div> <!-- end wrap for pushing footer -->
-
-<div id="footer">
- <div class="footer-landscape">
- <div class="footer-landscape-image">
- <!-- footer -->
- <div class="container">
- <div class="row footer">
- <div class="span12 tbox">
- <div class="tbox">
- <p>Docker is an open source project, sponsored by <a href="https://www.docker.com">Docker Inc.</a>, under the <a href="https://github.com/dotcloud/docker/blob/master/LICENSE" title="Docker licence, hosted in the Github repository">apache 2.0 licence</a></p>
- <p>Documentation proudly hosted by <a href="http://www.readthedocs.org">Read the Docs</a></p>
- </div>
-
- <div class="social links">
- <a title="Docker on Twitter" class="twitter" href="http://twitter.com/docker">Twitter</a>
- <a title="Docker on GitHub" class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
- <a title="Docker on Reddit" class="reddit" href="http://www.reddit.com/r/Docker/">Reddit</a>
- <a title="Docker on Google+" class="googleplus" href="https://plus.google.com/u/0/b/100381662757235514581/communities/108146856671494713993">Google+</a>
- <a title="Docker on Facebook" class="facebook" href="https://www.facebook.com/docker.run">Facebook</a>
- <a title="Docker on SlideShare" class="slideshare" href="http://www.slideshare.net/dotCloud">Slideshare</a>
- <a title="Docker on Youtube" class="youtube" href="http://www.youtube.com/user/dockerrun/">Youtube</a>
- <a title="Docker on Flickr" class="flickr" href="http://www.flickr.com/photos/99741659@N08/">Flickr</a>
- <a title="Docker on LinkedIn" class="linkedin" href="http://www.linkedin.com/company/dotcloud">LinkedIn</a>
- </div>
-
- <div class="tbox version-flyer ">
- <div class="content">
- <p class="version-note">Note: You are currently browsing the development documentation. The current release may work differently.</p>
-
- <small>Available versions:</small>
- <ul class="inline">
- {% for slug, url in versions %}
- <li class="alternative"><a href="{{ url }}{%- for word in pagename.split('/') -%}
- {%- if word != 'index' -%}
- {%- if word != '' -%}
- {{ word }}/
- {%- endif -%}
- {%- endif -%}
- {%- endfor -%}"
- title="Switch to {{ slug }}">{{ slug }}</a></li>
- {% endfor %}
- </ul>
- </div>
- </div>
-
-
- </div>
- </div>
- </div>
- </div>
- <!-- end of footer -->
- </div>
-
-</div>
-
-
-<script type="text/javascript" src="{{ pathto('_static/js/docs.js', 1) }}"></script>
-
-<!-- Swiftype search -->
-
-<script type="text/javascript">
- var Swiftype = window.Swiftype || {};
- (function() {
- Swiftype.key = 'pWPnnyvwcfpcrw1o51Sz';
- Swiftype.inputElement = '#st-search-input';
- Swiftype.resultContainingElement = '#st-results-container';
- Swiftype.attachElement = '#st-search-input';
- Swiftype.renderStyle = "overlay";
- // from https://swiftype.com/questions/how-can-i-make-more-popular-content-rank-higher
- // Use "page" for now -- they don't subgroup by document type yet.
- Swiftype.searchFunctionalBoosts = {"page": {"popularity": "linear"}};
-
- var script = document.createElement('script');
- script.type = 'text/javascript';
- script.async = true;
- script.src = "//swiftype.com/embed.js";
- var entry = document.getElementsByTagName('script')[0];
- entry.parentNode.insertBefore(script, entry);
- }());
-</script>
-
-
-<!-- Google analytics -->
-<script type="text/javascript">
-
- var _gaq = _gaq || [];
- _gaq.push(['_setAccount', 'UA-6096819-11']);
- _gaq.push(['_setDomainName', 'docker.io']);
- _gaq.push(['_setAllowLinker', true]);
- _gaq.push(['_trackPageview']);
-
- (function() {
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
- })();
-
-</script>
</body>
</html>

View file

@ -119,10 +119,10 @@ pages:
# - ['static_files/README.md', 'static_files', 'README']
- ['terms/index.md', '**HIDDEN**']
- ['terms/layer.md', '**HIDDEN**', 'layer']
- ['terms/index.md', '**HIDDEN**', 'Home']
- ['terms/registry.md', '**HIDDEN**', 'registry']
- ['terms/container.md', '**HIDDEN**', 'container']
- ['terms/repository.md', '**HIDDEN**', 'repository']
- ['terms/filesystem.md', '**HIDDEN**', 'filesystem']
- ['terms/image.md', '**HIDDEN**', 'image']
- ['terms/layer.md', '**HIDDEN**']
- ['terms/index.md', '**HIDDEN**']
- ['terms/registry.md', '**HIDDEN**']
- ['terms/container.md', '**HIDDEN**']
- ['terms/repository.md', '**HIDDEN**']
- ['terms/filesystem.md', '**HIDDEN**']
- ['terms/image.md', '**HIDDEN**']

File diff suppressed because it is too large Load diff

View file

@ -1,2 +0,0 @@
Sphinx==1.2.1
sphinxcontrib-httpdomain==1.2.0

View file

@ -1,65 +0,0 @@
:title: Create a Base Image
:description: How to create base images
:keywords: Examples, Usage, base image, docker, documentation, examples
.. _base_image_creation:
Create a Base Image
===================
So you want to create your own :ref:`base_image_def`? Great!
The specific process will depend heavily on the Linux distribution you
want to package. We have some examples below, and you are encouraged
to submit pull requests to contribute new ones.
Create a full image using tar
.............................
In general, you'll want to start with a working machine that is
running the distribution you'd like to package as a base image, though
that is not required for some tools like Debian's `Debootstrap
<https://wiki.debian.org/Debootstrap>`_, which you can also use to
build Ubuntu images.
It can be as simple as this to create an Ubuntu base image::
$ sudo debootstrap raring raring > /dev/null
$ sudo tar -C raring -c . | sudo docker import - raring
a29c15f1bf7a
$ sudo docker run raring cat /etc/lsb-release
DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=13.04
DISTRIB_CODENAME=raring
DISTRIB_DESCRIPTION="Ubuntu 13.04"
There are more example scripts for creating base images in the
Docker GitHub Repo:
* `BusyBox <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh>`_
* CentOS / Scientific Linux CERN (SLC) `on Debian/Ubuntu
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh>`_
or
`on CentOS/RHEL/SLC/etc.
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh>`_
* `Debian / Ubuntu
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh>`_
Creating a simple base image using ``scratch``
..............................................
There is a special repository in the Docker registry called ``scratch``, which
was created using an empty tar file::
$ tar cv --files-from /dev/null | docker import - scratch
which you can ``docker pull``. You can then use that image to base your new
minimal containers ``FROM``::
FROM scratch
ADD true-asm /true
CMD ["/true"]
The Dockerfile above is from extremely minimal image -
`tianon/true <https://github.com/tianon/dockerfiles/tree/master/true>`_.

View file

@ -1,15 +0,0 @@
:title: Docker articles
:description: various articles related to Docker
:keywords: docker, articles
.. _articles_list:
Articles
========
.. toctree::
:maxdepth: 1
security
baseimages
runmetrics

View file

@ -1,463 +0,0 @@
:title: Runtime Metrics
:description: Measure the behavior of running containers
:keywords: docker, metrics, CPU, memory, disk, IO, run, runtime
.. _run_metrics:
Runtime Metrics
===============
Linux Containers rely on `control groups
<https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt>`_ which
not only track groups of processes, but also expose metrics about CPU,
memory, and block I/O usage. You can access those metrics and obtain
network usage metrics as well. This is relevant for "pure" LXC
containers, as well as for Docker containers.
Control Groups
--------------
Control groups are exposed through a pseudo-filesystem. In recent
distros, you should find this filesystem under
``/sys/fs/cgroup``. Under that directory, you will see multiple
sub-directories, called devices, freezer, blkio, etc.; each
sub-directory actually corresponds to a different cgroup hierarchy.
On older systems, the control groups might be mounted on ``/cgroup``,
without distinct hierarchies. In that case, instead of seeing the
sub-directories, you will see a bunch of files in that directory, and
possibly some directories corresponding to existing containers.
To figure out where your control groups are mounted, you can run:
::
grep cgroup /proc/mounts
.. _run_findpid:
Enumerating Cgroups
-------------------
You can look into ``/proc/cgroups`` to see the different control group
subsystems known to the system, the hierarchy they belong to, and how
many groups they contain.
You can also look at ``/proc/<pid>/cgroup`` to see which control
groups a process belongs to. The control group will be shown as a path
relative to the root of the hierarchy mountpoint; e.g. ``/`` means
“this process has not been assigned into a particular group”, while
``/lxc/pumpkin`` means that the process is likely to be a member of a
container named ``pumpkin``.
Finding the Cgroup for a Given Container
----------------------------------------
For each container, one cgroup will be created in each hierarchy. On
older systems with older versions of the LXC userland tools, the name
of the cgroup will be the name of the container. With more recent
versions of the LXC tools, the cgroup will be ``lxc/<container_name>.``
For Docker containers using cgroups, the container name will be the
full ID or long ID of the container. If a container shows up as
ae836c95b4c3 in ``docker ps``, its long ID might be something like
``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You
can look it up with ``docker inspect`` or ``docker ps --no-trunc``.
Putting everything together to look at the memory metrics for a Docker
container, take a look at ``/sys/fs/cgroup/memory/lxc/<longid>/``.
Metrics from Cgroups: Memory, CPU, Block IO
-------------------------------------------
For each subsystem (memory, CPU, and block I/O), you will find one or
more pseudo-files containing statistics.
Memory Metrics: ``memory.stat``
...............................
Memory metrics are found in the "memory" cgroup. Note that the memory
control group adds a little overhead, because it does very
fine-grained accounting of the memory usage on your host. Therefore,
many distros chose to not enable it by default. Generally, to enable
it, all you have to do is to add some kernel command-line parameters:
``cgroup_enable=memory swapaccount=1``.
The metrics are in the pseudo-file ``memory.stat``. Here is what it
will look like:
::
cache 11492564992
rss 1930993664
mapped_file 306728960
pgpgin 406632648
pgpgout 403355412
swap 0
pgfault 728281223
pgmajfault 1724
inactive_anon 46608384
active_anon 1884520448
inactive_file 7003344896
active_file 4489052160
unevictable 32768
hierarchical_memory_limit 9223372036854775807
hierarchical_memsw_limit 9223372036854775807
total_cache 11492564992
total_rss 1930993664
total_mapped_file 306728960
total_pgpgin 406632648
total_pgpgout 403355412
total_swap 0
total_pgfault 728281223
total_pgmajfault 1724
total_inactive_anon 46608384
total_active_anon 1884520448
total_inactive_file 7003344896
total_active_file 4489052160
total_unevictable 32768
The first half (without the ``total_`` prefix) contains statistics
relevant to the processes within the cgroup, excluding
sub-cgroups. The second half (with the ``total_`` prefix) includes
sub-cgroups as well.
Some metrics are "gauges", i.e. values that can increase or decrease
(e.g. swap, the amount of swap space used by the members of the
cgroup). Some others are "counters", i.e. values that can only go up,
because they represent occurrences of a specific event (e.g. pgfault,
which indicates the number of page faults which happened since the
creation of the cgroup; this number can never decrease).
cache
the amount of memory used by the processes of this control group
that can be associated precisely with a block on a block
device. When you read from and write to files on disk, this amount
will increase. This will be the case if you use "conventional" I/O
(``open``, ``read``, ``write`` syscalls) as well as mapped files
(with ``mmap``). It also accounts for the memory used by ``tmpfs``
mounts, though the reasons are unclear.
rss
the amount of memory that *doesn't* correspond to anything on
disk: stacks, heaps, and anonymous memory maps.
mapped_file
indicates the amount of memory mapped by the processes in the
control group. It doesn't give you information about *how much*
memory is used; it rather tells you *how* it is used.
pgfault and pgmajfault
indicate the number of times that a process of the cgroup triggered
a "page fault" and a "major fault", respectively. A page fault
happens when a process accesses a part of its virtual memory space
which is nonexistent or protected. The former can happen if the
process is buggy and tries to access an invalid address (it will
then be sent a ``SIGSEGV`` signal, typically killing it with the
famous ``Segmentation fault`` message). The latter can happen when
the process reads from a memory zone which has been swapped out, or
which corresponds to a mapped file: in that case, the kernel will
load the page from disk, and let the CPU complete the memory
access. It can also happen when the process writes to a
copy-on-write memory zone: likewise, the kernel will preempt the
process, duplicate the memory page, and resume the write operation
on the process' own copy of the page. "Major" faults happen when the
kernel actually has to read the data from disk. When it just has to
duplicate an existing page, or allocate an empty page, it's a
regular (or "minor") fault.
swap
the amount of swap currently used by the processes in this cgroup.
active_anon and inactive_anon
the amount of *anonymous* memory that has been identified has
respectively *active* and *inactive* by the kernel. "Anonymous"
memory is the memory that is *not* linked to disk pages. In other
words, that's the equivalent of the rss counter described above. In
fact, the very definition of the rss counter is **active_anon** +
**inactive_anon** - **tmpfs** (where tmpfs is the amount of memory
used up by ``tmpfs`` filesystems mounted by this control
group). Now, what's the difference between "active" and "inactive"?
Pages are initially "active"; and at regular intervals, the kernel
sweeps over the memory, and tags some pages as "inactive". Whenever
they are accessed again, they are immediately retagged
"active". When the kernel is almost out of memory, and time comes to
swap out to disk, the kernel will swap "inactive" pages.
active_file and inactive_file
cache memory, with *active* and *inactive* similar to the *anon*
memory above. The exact formula is cache = **active_file** +
**inactive_file** + **tmpfs**. The exact rules used by the kernel to
move memory pages between active and inactive sets are different
from the ones used for anonymous memory, but the general principle
is the same. Note that when the kernel needs to reclaim memory, it
is cheaper to reclaim a clean (=non modified) page from this pool,
since it can be reclaimed immediately (while anonymous pages and
dirty/modified pages have to be written to disk first).
unevictable
the amount of memory that cannot be reclaimed; generally, it will
account for memory that has been "locked" with ``mlock``. It is
often used by crypto frameworks to make sure that secret keys and
other sensitive material never gets swapped out to disk.
memory and memsw limits
These are not really metrics, but a reminder of the limits applied
to this cgroup. The first one indicates the maximum amount of
physical memory that can be used by the processes of this control
group; the second one indicates the maximum amount of RAM+swap.
Accounting for memory in the page cache is very complex. If two
processes in different control groups both read the same file
(ultimately relying on the same blocks on disk), the corresponding
memory charge will be split between the control groups. It's nice, but
it also means that when a cgroup is terminated, it could increase the
memory usage of another cgroup, because they are not splitting the
cost anymore for those memory pages.
CPU metrics: ``cpuacct.stat``
.............................
Now that we've covered memory metrics, everything else will look very
simple in comparison. CPU metrics will be found in the ``cpuacct``
controller.
For each container, you will find a pseudo-file ``cpuacct.stat``,
containing the CPU usage accumulated by the processes of the
container, broken down between ``user`` and ``system`` time. If you're
not familiar with the distinction, ``user`` is the time during which
the processes were in direct control of the CPU (i.e. executing
process code), and ``system`` is the time during which the CPU was
executing system calls on behalf of those processes.
Those times are expressed in ticks of 1/100th of a second. Actually,
they are expressed in "user jiffies". There are ``USER_HZ``
*"jiffies"* per second, and on x86 systems, ``USER_HZ`` is 100. This
used to map exactly to the number of scheduler "ticks" per second; but
with the advent of higher frequency scheduling, as well as `tickless
kernels <http://lwn.net/Articles/549580/>`_, the number of kernel
ticks wasn't relevant anymore. It stuck around anyway, mainly for
legacy and compatibility reasons.
Block I/O metrics
.................
Block I/O is accounted in the ``blkio`` controller. Different metrics
are scattered across different files. While you can find in-depth
details in the `blkio-controller
<https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt>`_
file in the kernel documentation, here is a short list of the most
relevant ones:
blkio.sectors
contain the number of 512-bytes sectors read and written by the
processes member of the cgroup, device by device. Reads and writes
are merged in a single counter.
blkio.io_service_bytes
indicates the number of bytes read and written by the cgroup. It has
4 counters per device, because for each device, it differentiates
between synchronous vs. asynchronous I/O, and reads vs. writes.
blkio.io_serviced
the number of I/O operations performed, regardless of their size. It
also has 4 counters per device.
blkio.io_queued
indicates the number of I/O operations currently queued for this
cgroup. In other words, if the cgroup isn't doing any I/O, this will
be zero. Note that the opposite is not true. In other words, if
there is no I/O queued, it does not mean that the cgroup is idle
(I/O-wise). It could be doing purely synchronous reads on an
otherwise quiescent device, which is therefore able to handle them
immediately, without queuing. Also, while it is helpful to figure
out which cgroup is putting stress on the I/O subsystem, keep in
mind that is is a relative quantity. Even if a process group does
not perform more I/O, its queue size can increase just because the
device load increases because of other devices.
Network Metrics
---------------
Network metrics are not exposed directly by control groups. There is a
good explanation for that: network interfaces exist within the context
of *network namespaces*. The kernel could probably accumulate metrics
about packets and bytes sent and received by a group of processes, but
those metrics wouldn't be very useful. You want per-interface metrics
(because traffic happening on the local ``lo`` interface doesn't
really count). But since processes in a single cgroup can belong to
multiple network namespaces, those metrics would be harder to
interpret: multiple network namespaces means multiple ``lo``
interfaces, potentially multiple ``eth0`` interfaces, etc.; so this is
why there is no easy way to gather network metrics with control
groups.
Instead we can gather network metrics from other sources:
IPtables
........
IPtables (or rather, the netfilter framework for which iptables is
just an interface) can do some serious accounting.
For instance, you can setup a rule to account for the outbound HTTP
traffic on a web server:
::
iptables -I OUTPUT -p tcp --sport 80
There is no ``-j`` or ``-g`` flag, so the rule will just count matched
packets and go to the following rule.
Later, you can check the values of the counters, with:
::
iptables -nxvL OUTPUT
Technically, ``-n`` is not required, but it will prevent iptables from
doing DNS reverse lookups, which are probably useless in this
scenario.
Counters include packets and bytes. If you want to setup metrics for
container traffic like this, you could execute a ``for`` loop to add
two ``iptables`` rules per container IP address (one in each
direction), in the ``FORWARD`` chain. This will only meter traffic
going through the NAT layer; you will also have to add traffic going
through the userland proxy.
Then, you will need to check those counters on a regular basis. If you
happen to use ``collectd``, there is a nice plugin to automate
iptables counters collection.
Interface-level counters
........................
Since each container has a virtual Ethernet interface, you might want
to check directly the TX and RX counters of this interface. You will
notice that each container is associated to a virtual Ethernet
interface in your host, with a name like ``vethKk8Zqi``. Figuring out
which interface corresponds to which container is, unfortunately,
difficult.
But for now, the best way is to check the metrics *from within the
containers*. To accomplish this, you can run an executable from the
host environment within the network namespace of a container using
**ip-netns magic**.
The ``ip-netns exec`` command will let you execute any program
(present in the host system) within any network namespace visible to
the current process. This means that your host will be able to enter
the network namespace of your containers, but your containers won't be
able to access the host, nor their sibling containers. Containers will
be able to “see” and affect their sub-containers, though.
The exact format of the command is::
ip netns exec <nsname> <command...>
For example::
ip netns exec mycontainer netstat -i
``ip netns`` finds the "mycontainer" container by using namespaces
pseudo-files. Each process belongs to one network namespace, one PID
namespace, one ``mnt`` namespace, etc., and those namespaces are
materialized under ``/proc/<pid>/ns/``. For example, the network
namespace of PID 42 is materialized by the pseudo-file
``/proc/42/ns/net``.
When you run ``ip netns exec mycontainer ...``, it expects
``/var/run/netns/mycontainer`` to be one of those
pseudo-files. (Symlinks are accepted.)
In other words, to execute a command within the network namespace of a
container, we need to:
* Find out the PID of any process within the container that we want to
investigate;
* Create a symlink from ``/var/run/netns/<somename>`` to
``/proc/<thepid>/ns/net``
* Execute ``ip netns exec <somename> ....``
Please review :ref:`run_findpid` to learn how to find the cgroup of a
pprocess running in the container of which you want to measure network
usage. From there, you can examine the pseudo-file named ``tasks``,
which containes the PIDs that are in the control group (i.e. in the
container). Pick any one of them.
Putting everything together, if the "short ID" of a container is held
in the environment variable ``$CID``, then you can do this::
TASKS=/sys/fs/cgroup/devices/$CID*/tasks
PID=$(head -n 1 $TASKS)
mkdir -p /var/run/netns
ln -sf /proc/$PID/ns/net /var/run/netns/$CID
ip netns exec $CID netstat -i
Tips for high-performance metric collection
-------------------------------------------
Note that running a new process each time you want to update metrics
is (relatively) expensive. If you want to collect metrics at high
resolutions, and/or over a large number of containers (think 1000
containers on a single host), you do not want to fork a new process
each time.
Here is how to collect metrics from a single process. You will have to
write your metric collector in C (or any language that lets you do
low-level system calls). You need to use a special system call,
``setns()``, which lets the current process enter any arbitrary
namespace. It requires, however, an open file descriptor to the
namespace pseudo-file (remember: thats the pseudo-file in
``/proc/<pid>/ns/net``).
However, there is a catch: you must not keep this file descriptor
open. If you do, when the last process of the control group exits, the
namespace will not be destroyed, and its network resources (like the
virtual interface of the container) will stay around for ever (or
until you close that file descriptor).
The right approach would be to keep track of the first PID of each
container, and re-open the namespace pseudo-file each time.
Collecting metrics when a container exits
-----------------------------------------
Sometimes, you do not care about real time metric collection, but when
a container exits, you want to know how much CPU, memory, etc. it has
used.
Docker makes this difficult because it relies on ``lxc-start``, which
carefully cleans up after itself, but it is still possible. It is
usually easier to collect metrics at regular intervals (e.g. every
minute, with the collectd LXC plugin) and rely on that instead.
But, if you'd still like to gather the stats when a container stops,
here is how:
For each container, start a collection process, and move it to the
control groups that you want to monitor by writing its PID to the
tasks file of the cgroup. The collection process should periodically
re-read the tasks file to check if it's the last process of the
control group. (If you also want to collect network statistics as
explained in the previous section, you should also move the process to
the appropriate network namespace.)
When the container exits, ``lxc-start`` will try to delete the control
groups. It will fail, since the control group is still in use; but
thats fine. You process should now detect that it is the only one
remaining in the group. Now is the right time to collect all the
metrics you need!
Finally, your process should move itself back to the root control
group, and remove the container control group. To remove a control
group, just ``rmdir`` its directory. It's counter-intuitive to
``rmdir`` a directory as it still contains files; but remember that
this is a pseudo-filesystem, so usual rules don't apply. After the
cleanup is done, the collection process can exit safely.

View file

@ -1,269 +0,0 @@
:title: Docker Security
:description: Review of the Docker Daemon attack surface
:keywords: Docker, Docker documentation, security
.. _dockersecurity:
Docker Security
===============
*Adapted from* `Containers & Docker: How Secure are They? <blogsecurity_>`_
There are three major areas to consider when reviewing Docker security:
* the intrinsic security of containers, as implemented by kernel
namespaces and cgroups;
* the attack surface of the Docker daemon itself;
* the "hardening" security features of the kernel and how they
interact with containers.
Kernel Namespaces
-----------------
Docker containers are essentially LXC containers, and they come with
the same security features. When you start a container with ``docker
run``, behind the scenes Docker uses ``lxc-start`` to execute the
Docker container. This creates a set of namespaces and control groups
for the container. Those namespaces and control groups are not created
by Docker itself, but by ``lxc-start``. This means that as the LXC
userland tools evolve (and provide additional namespaces and isolation
features), Docker will automatically make use of them.
**Namespaces provide the first and most straightforward form of
isolation**: processes running within a container cannot see, and even
less affect, processes running in another container, or in the host
system.
**Each container also gets its own network stack**, meaning that a
container doesnt get a privileged access to the sockets or interfaces
of another container. Of course, if the host system is setup
accordingly, containers can interact with each other through their
respective network interfaces — just like they can interact with
external hosts. When you specify public ports for your containers or
use :ref:`links <working_with_links_names>` then IP traffic is allowed
between containers. They can ping each other, send/receive UDP
packets, and establish TCP connections, but that can be restricted if
necessary. From a network architecture point of view, all containers
on a given Docker host are sitting on bridge interfaces. This means
that they are just like physical machines connected through a common
Ethernet switch; no more, no less.
How mature is the code providing kernel namespaces and private
networking? Kernel namespaces were introduced `between kernel version
2.6.15 and 2.6.26
<http://lxc.sourceforge.net/index.php/about/kernel-namespaces/>`_. This
means that since July 2008 (date of the 2.6.26 release, now 5 years
ago), namespace code has been exercised and scrutinized on a large
number of production systems. And there is more: the design and
inspiration for the namespaces code are even older. Namespaces are
actually an effort to reimplement the features of `OpenVZ
<http://en.wikipedia.org/wiki/OpenVZ>`_ in such a way that they could
be merged within the mainstream kernel. And OpenVZ was initially
released in 2005, so both the design and the implementation are
pretty mature.
Control Groups
--------------
Control Groups are the other key component of Linux Containers. They
implement resource accounting and limiting. They provide a lot of very
useful metrics, but they also help to ensure that each container gets
its fair share of memory, CPU, disk I/O; and, more importantly, that a
single container cannot bring the system down by exhausting one of
those resources.
So while they do not play a role in preventing one container from
accessing or affecting the data and processes of another container,
they are essential to fend off some denial-of-service attacks. They
are particularly important on multi-tenant platforms, like public and
private PaaS, to guarantee a consistent uptime (and performance) even
when some applications start to misbehave.
Control Groups have been around for a while as well: the code was
started in 2006, and initially merged in kernel 2.6.24.
.. _dockersecurity_daemon:
Docker Daemon Attack Surface
----------------------------
Running containers (and applications) with Docker implies running the
Docker daemon. This daemon currently requires root privileges, and you
should therefore be aware of some important details.
First of all, **only trusted users should be allowed to control your
Docker daemon**. This is a direct consequence of some powerful Docker
features. Specifically, Docker allows you to share a directory between
the Docker host and a guest container; and it allows you to do so
without limiting the access rights of the container. This means that
you can start a container where the ``/host`` directory will be the
``/`` directory on your host; and the container will be able to alter
your host filesystem without any restriction. This sounds crazy? Well,
you have to know that **all virtualization systems allowing filesystem
resource sharing behave the same way**. Nothing prevents you from
sharing your root filesystem (or even your root block device) with a
virtual machine.
This has a strong security implication: if you instrument Docker from
e.g. a web server to provision containers through an API, you should
be even more careful than usual with parameter checking, to make sure
that a malicious user cannot pass crafted parameters causing Docker to
create arbitrary containers.
For this reason, the REST API endpoint (used by the Docker CLI to
communicate with the Docker daemon) changed in Docker 0.5.2, and now
uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the
latter being prone to cross-site-scripting attacks if you happen to
run Docker directly on your local machine, outside of a VM). You can
then use traditional UNIX permission checks to limit access to the
control socket.
You can also expose the REST API over HTTP if you explicitly decide
so. However, if you do that, being aware of the abovementioned
security implication, you should ensure that it will be reachable
only from a trusted network or VPN; or protected with e.g. ``stunnel``
and client SSL certificates.
Recent improvements in Linux namespaces will soon allow to run
full-featured containers without root privileges, thanks to the new
user namespace. This is covered in detail `here
<http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/>`_. Moreover,
this will solve the problem caused by sharing filesystems between host
and guest, since the user namespace allows users within containers
(including the root user) to be mapped to other users in the host
system.
The end goal for Docker is therefore to implement two additional
security improvements:
* map the root user of a container to a non-root user of the Docker
host, to mitigate the effects of a container-to-host privilege
escalation;
* allow the Docker daemon to run without root privileges, and delegate
operations requiring those privileges to well-audited sub-processes,
each with its own (very limited) scope: virtual network setup,
filesystem management, etc.
Finally, if you run Docker on a server, it is recommended to run
exclusively Docker in the server, and move all other services within
containers controlled by Docker. Of course, it is fine to keep your
favorite admin tools (probably at least an SSH server), as well as
existing monitoring/supervision processes (e.g. NRPE, collectd, etc).
Linux Kernel Capabilities
-------------------------
By default, Docker starts containers with a very restricted set of
capabilities. What does that mean?
Capabilities turn the binary "root/non-root" dichotomy into a
fine-grained access control system. Processes (like web servers) that
just need to bind on a port below 1024 do not have to run as root:
they can just be granted the ``net_bind_service`` capability
instead. And there are many other capabilities, for almost all the
specific areas where root privileges are usually needed.
This means a lot for container security; lets see why!
Your average server (bare metal or virtual machine) needs to run a
bunch of processes as root. Those typically include SSH, cron,
syslogd; hardware management tools (to e.g. load modules), network
configuration tools (to handle e.g. DHCP, WPA, or VPNs), and much
more. A container is very different, because almost all of those tasks
are handled by the infrastructure around the container:
* SSH access will typically be managed by a single server running in
the Docker host;
* ``cron``, when necessary, should run as a user process, dedicated
and tailored for the app that needs its scheduling service, rather
than as a platform-wide facility;
* log management will also typically be handed to Docker, or by
third-party services like Loggly or Splunk;
* hardware management is irrelevant, meaning that you never need to
run ``udevd`` or equivalent daemons within containers;
* network management happens outside of the containers, enforcing
separation of concerns as much as possible, meaning that a container
should never need to perform ``ifconfig``, ``route``, or ip commands
(except when a container is specifically engineered to behave like a
router or firewall, of course).
This means that in most cases, containers will not need "real" root
privileges *at all*. And therefore, containers can run with a reduced
capability set; meaning that "root" within a container has much less
privileges than the real "root". For instance, it is possible to:
* deny all "mount" operations;
* deny access to raw sockets (to prevent packet spoofing);
* deny access to some filesystem operations, like creating new device
nodes, changing the owner of files, or altering attributes
(including the immutable flag);
* deny module loading;
* and many others.
This means that even if an intruder manages to escalate to root within
a container, it will be much harder to do serious damage, or to
escalate to the host.
This won't affect regular web apps; but malicious users will find that
the arsenal at their disposal has shrunk considerably! You can see
`the list of dropped capabilities in the Docker code
<https://github.com/dotcloud/docker/blob/v0.5.0/lxc_template.go#L97>`_,
and a full list of available capabilities in `Linux manpages
<http://man7.org/linux/man-pages/man7/capabilities.7.html>`_.
Of course, you can always enable extra capabilities if you really need
them (for instance, if you want to use a FUSE-based filesystem), but
by default, Docker containers will be locked down to ensure maximum
safety.
Other Kernel Security Features
------------------------------
Capabilities are just one of the many security features provided by
modern Linux kernels. It is also possible to leverage existing,
well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with
Docker.
While Docker currently only enables capabilities, it doesn't interfere
with the other systems. This means that there are many different ways
to harden a Docker host. Here are a few examples.
* You can run a kernel with GRSEC and PAX. This will add many safety
checks, both at compile-time and run-time; it will also defeat many
exploits, thanks to techniques like address randomization. It
doesnt require Docker-specific configuration, since those security
features apply system-wide, independently of containers.
* If your distribution comes with security model templates for LXC
containers, you can use them out of the box. For instance, Ubuntu
comes with AppArmor templates for LXC, and those templates provide
an extra safety net (even though it overlaps greatly with
capabilities).
* You can define your own policies using your favorite access control
mechanism. Since Docker containers are standard LXC containers,
there is nothing “magic” or specific to Docker.
Just like there are many third-party tools to augment Docker
containers with e.g. special network topologies or shared filesystems,
you can expect to see tools to harden existing Docker containers
without affecting Dockers core.
Conclusions
-----------
Docker containers are, by default, quite secure; especially if you
take care of running your processes inside the containers as
non-privileged users (i.e. non root).
You can add an extra layer of safety by enabling Apparmor, SELinux,
GRSEC, or your favorite hardening solution.
Last but not least, if you see interesting security features in other
containerization systems, you will be able to implement them as well
with Docker, since everything is provided by the kernel anyway.
For more context and especially for comparisons with VMs and other
container systems, please also see the `original blog post
<blogsecurity_>`_.
.. _blogsecurity: http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/

View file

@ -1,266 +0,0 @@
# -*- coding: utf-8 -*-
#
# Docker documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 19 12:34:07 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Additional templates that should be rendered to pages, maps page names to
# template names.
# the 'redirect_home.html' page redirects using a http meta refresh which, according
# to official sources is more or less equivalent of a 301.
html_additional_pages = {
'concepts/containers': 'redirect_home.html',
'concepts/introduction': 'redirect_home.html',
'builder/basics': 'redirect_build.html',
}
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks']
# Configure extlinks
extlinks = { 'issue': ('https://github.com/dotcloud/docker/issues/%s',
'Issue ') }
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
html_add_permalinks = u''
# The master toctree document.
master_doc = 'toctree'
# General information about the project.
project = u'Docker'
copyright = u'2014 Docker, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'docker'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = ['../theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# We use a png favicon. This is not compatible with internet explorer, but looks
# much better on all other browsers. However, sphynx doesn't like it (it likes
# .ico better) so we have just put it in the template rather than used this setting
# html_favicon = 'favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static_files']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Dockerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('toctree', 'Docker.tex', u'Docker Documentation',
u'Team Docker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('reference/commandline/cli', 'docker', u'Docker CLI Documentation',
[u'Team Docker'], 1),
('reference/builder', 'Dockerfile', u'Dockerfile Documentation',
[u'Team Docker'], 5),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('toctree', 'Docker', u'Docker Documentation',
u'Team Docker', 'Docker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'

View file

@ -1,25 +0,0 @@
:title: Contribution Guidelines
:description: Contribution guidelines: create issues, conventions, pull requests
:keywords: contributing, docker, documentation, help, guideline
Contributing to Docker
======================
Want to hack on Docker? Awesome!
The repository includes `all the instructions you need to get
started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_.
The `developer environment Dockerfile
<https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
specifies the tools and versions used to test and build Docker.
If you're making changes to the documentation, see the
`README.md <https://github.com/dotcloud/docker/blob/master/docs/README.md>`_.
The `documentation environment Dockerfile
<https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
specifies the tools and versions used to build the Documentation.
Further interesting details can be found in the `Packaging hints
<https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.

View file

@ -1,167 +0,0 @@
:title: Setting Up a Dev Environment
:description: Guides on how to contribute to docker
:keywords: Docker, documentation, developers, contributing, dev environment
Setting Up a Dev Environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To make it easier to contribute to Docker, we provide a standard
development environment. It is important that the same environment be
used for all tests, builds and releases. The standard development
environment defines all build dependencies: system libraries and
binaries, go environment, go dependencies, etc.
Step 1: Install Docker
----------------------
Docker's build environment itself is a Docker container, so the first
step is to install Docker on your system.
You can follow the `install instructions most relevant to your system
<https://docs.docker.io/en/latest/installation/>`_. Make sure you have
a working, up-to-date docker installation, then continue to the next
step.
Step 2: Install tools used for this tutorial
--------------------------------------------
Install ``git``; honest, it's very good. You can use other ways to get the Docker
source, but they're not anywhere near as easy.
Install ``make``. This tutorial uses our base Makefile to kick off the docker
containers in a repeatable and consistent way. Again, you can do it in other ways
but you need to do more work.
Step 3: Check out the Source
----------------------------
.. code-block:: bash
git clone http://git@github.com/dotcloud/docker
cd docker
To checkout a different revision just use ``git checkout`` with the name of branch or revision number.
Step 4: Build the Environment
-----------------------------
This following command will build a development environment using the Dockerfile in the current directory. Essentially, it will install all the build and runtime dependencies necessary to build and test Docker. This command will take some time to complete when you first execute it.
.. code-block:: bash
sudo make build
If the build is successful, congratulations! You have produced a clean build of
docker, neatly encapsulated in a standard build environment.
Step 5: Build the Docker Binary
-------------------------------
To create the Docker binary, run this command:
.. code-block:: bash
sudo make binary
This will create the Docker binary in ``./bundles/<version>-dev/binary/``
Using your built Docker binary
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The binary is available outside the container in the directory
``./bundles/<version>-dev/binary/``. You can swap your host docker executable
with this binary for live testing - for example, on ubuntu:
.. code-block:: bash
sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start
.. note:: Its safer to run the tests below before swapping your hosts docker binary.
Step 5: Run the Tests
---------------------
To execute the test cases, run this command:
.. code-block:: bash
sudo make test
If the test are successful then the tail of the output should look something like this
.. code-block:: bash
--- PASS: TestWriteBroadcaster (0.00 seconds)
=== RUN TestRaceWriteBroadcaster
--- PASS: TestRaceWriteBroadcaster (0.00 seconds)
=== RUN TestTruncIndex
--- PASS: TestTruncIndex (0.00 seconds)
=== RUN TestCompareKernelVersion
--- PASS: TestCompareKernelVersion (0.00 seconds)
=== RUN TestHumanSize
--- PASS: TestHumanSize (0.00 seconds)
=== RUN TestParseHost
--- PASS: TestParseHost (0.00 seconds)
=== RUN TestParseRepositoryTag
--- PASS: TestParseRepositoryTag (0.00 seconds)
=== RUN TestGetResolvConf
--- PASS: TestGetResolvConf (0.00 seconds)
=== RUN TestCheckLocalDns
--- PASS: TestCheckLocalDns (0.00 seconds)
=== RUN TestParseRelease
--- PASS: TestParseRelease (0.00 seconds)
=== RUN TestDependencyGraphCircular
--- PASS: TestDependencyGraphCircular (0.00 seconds)
=== RUN TestDependencyGraph
--- PASS: TestDependencyGraph (0.00 seconds)
PASS
ok github.com/dotcloud/docker/utils 0.017s
If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
You can use this to select certain tests to run, eg.
TESTFLAGS='-run ^TestBuild$' make test
If the output indicates "FAIL" and you see errors like this:
.. code-block:: text
server.go:1302 Error: Insertion failed because database is full: database or disk is full
utils_test.go:179: Error copy: exit status 1 (cp: writing '/tmp/docker-testd5c9-[...]': No space left on device
Then you likely don't have enough memory available the test suite. 2GB is recommended.
Step 6: Use Docker
-------------------
You can run an interactive session in the newly built container:
.. code-block:: bash
sudo make shell
# type 'exit' or Ctrl-D to exit
Extra Step: Build and view the Documentation
--------------------------------------------
If you want to read the documentation from a local website, or are making changes
to it, you can build the documentation and then serve it by:
.. code-block:: bash
sudo make docs
# when its done, you can point your browser to http://yourdockerhost:8000
# type Ctrl-C to exit
**Need More Help?**
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailing list <https://groups.google.com/d/forum/docker-dev>`_.

View file

@ -1,14 +0,0 @@
:title: Contributing to Docker
:description: Guides on how to contribute to docker
:keywords: Docker, documentation, developers, contributing, dev environment
Contributing
============
.. toctree::
:maxdepth: 1
contributing
devenvironment

View file

@ -1,102 +0,0 @@
:title: Running an apt-cacher-ng service
:description: Installing and running an apt-cacher-ng service
:keywords: docker, example, package installation, networking, debian, ubuntu
.. _running_apt-cacher-ng_service:
Apt-Cacher-ng Service
=====================
.. include:: example_header.inc
When you have multiple Docker servers, or build unrelated Docker containers
which can't make use of the Docker build cache, it can be useful to have a
caching proxy for your packages. This container makes the second download of
any package almost instant.
Use the following Dockerfile:
.. literalinclude:: apt-cacher-ng.Dockerfile
To build the image using:
.. code-block:: bash
$ sudo docker build -t eg_apt_cacher_ng .
Then run it, mapping the exposed port to one on the host
.. code-block:: bash
$ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
To see the logfiles that are 'tailed' in the default command, you can use:
.. code-block:: bash
$ sudo docker logs -f test_apt_cacher_ng
To get your Debian-based containers to use the proxy, you can do one of three things
1. Add an apt Proxy setting ``echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy``
2. Set an environment variable: ``http_proxy=http://dockerhost:3142/``
3. Change your ``sources.list`` entries to start with ``http://dockerhost:3142/``
**Option 1** injects the settings safely into your apt configuration in a local
version of a common base:
.. code-block:: bash
FROM ubuntu
RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy
RUN apt-get update ; apt-get install vim git
# docker build -t my_ubuntu .
**Option 2** is good for testing, but will
break other HTTP clients which obey ``http_proxy``, such as ``curl``, ``wget`` and others:
.. code-block:: bash
$ sudo docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
**Option 3** is the least portable, but there will be times when you might need to
do it and you can do it from your ``Dockerfile`` too.
Apt-cacher-ng has some tools that allow you to manage the repository, and they
can be used by leveraging the ``VOLUME`` instruction, and the image we built to run the
service:
.. code-block:: bash
$ sudo docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
$$ /usr/lib/apt-cacher-ng/distkill.pl
Scanning /var/cache/apt-cacher-ng, please wait...
Found distributions:
bla, taggedcount: 0
1. precise-security (36 index files)
2. wheezy (25 index files)
3. precise-updates (36 index files)
4. precise (36 index files)
5. wheezy-updates (18 index files)
Found architectures:
6. amd64 (36 index files)
7. i386 (24 index files)
WARNING: The removal action may wipe out whole directories containing
index files. Select d to see detailed list.
(Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q
Finally, clean up after your test by stopping and removing the container, and
then removing the image.
.. code-block:: bash
$ sudo docker stop test_apt_cacher_ng
$ sudo docker rm test_apt_cacher_ng
$ sudo docker rmi eg_apt_cacher_ng

View file

@ -1,137 +0,0 @@
:title: Process Management with CFEngine
:description: Managing containerized processes with CFEngine
:keywords: cfengine, process, management, usage, docker, documentation
Process Management with CFEngine
================================
Create Docker containers with managed processes.
Docker monitors one process in each running container and the container lives or dies with that process.
By introducing CFEngine inside Docker containers, we can alleviate a few of the issues that may arise:
* It is possible to easily start multiple processes within a container, all of which will be managed automatically, with the normal ``docker run`` command.
* If a managed process dies or crashes, CFEngine will start it again within 1 minute.
* The container itself will live as long as the CFEngine scheduling daemon (cf-execd) lives. With CFEngine, we are able to decouple the life of the container from the uptime of the service it provides.
How it works
------------
CFEngine, together with the cfe-docker integration policies, are installed as part of the Dockerfile. This builds CFEngine into our Docker image.
The Dockerfile's ``ENTRYPOINT`` takes an arbitrary amount of commands (with any desired arguments) as parameters.
When we run the Docker container these parameters get written to CFEngine policies and CFEngine takes over to ensure that the desired processes are running in the container.
CFEngine scans the process table for the ``basename`` of the commands given to the ``ENTRYPOINT`` and runs the command to start the process if the ``basename`` is not found.
For example, if we start the container with ``docker run "/path/to/my/application parameters"``, CFEngine will look for a process named ``application`` and run the command.
If an entry for ``application`` is not found in the process table at any point in time, CFEngine will execute ``/path/to/my/application parameters`` to start the application once again.
The check on the process table happens every minute.
Note that it is therefore important that the command to start your application leaves a process with the basename of the command.
This can be made more flexible by making some minor adjustments to the CFEngine policies, if desired.
Usage
-----
This example assumes you have Docker installed and working.
We will install and manage ``apache2`` and ``sshd`` in a single container.
There are three steps:
1. Install CFEngine into the container.
2. Copy the CFEngine Docker process management policy into the containerized CFEngine installation.
3. Start your application processes as part of the ``docker run`` command.
Building the container image
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The first two steps can be done as part of a Dockerfile, as follows.
.. code-block:: bash
FROM ubuntu
MAINTAINER Eystein Måløy Stenberg <eytein.stenberg@gmail.com>
RUN apt-get -y install wget lsb-release unzip ca-certificates
# install latest CFEngine
RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add -
RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list
RUN apt-get update
RUN apt-get install cfengine-community
# install cfe-docker process management policy
RUN wget https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/
RUN cp /tmp/cfe-docker-master/cfengine/bin/* /var/cfengine/bin/
RUN cp /tmp/cfe-docker-master/cfengine/inputs/* /var/cfengine/inputs/
RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip
# apache2 and openssh are just for testing purposes, install your own apps here
RUN apt-get -y install openssh-server apache2
RUN mkdir -p /var/run/sshd
RUN echo "root:password" | chpasswd # need a password for ssh
ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"]
By saving this file as ``Dockerfile`` to a working directory, you can then build your container with the docker build command,
e.g. ``docker build -t managed_image``.
Testing the container
~~~~~~~~~~~~~~~~~~~~~
Start the container with ``apache2`` and ``sshd`` running and managed, forwarding a port to our SSH instance:
.. code-block:: bash
docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
We now clearly see one of the benefits of the cfe-docker integration: it allows to start several processes
as part of a normal ``docker run`` command.
We can now log in to our new container and see that both ``apache2`` and ``sshd`` are running. We have set the root password to
"password" in the Dockerfile above and can use that to log in with ssh:
.. code-block:: bash
ssh -p222 root@127.0.0.1
ps -ef
UID PID PPID C STIME TTY TIME CMD
root 1 0 0 07:48 ? 00:00:00 /bin/bash /var/cfengine/bin/docker_processes_run.sh /usr/sbin/sshd /etc/init.d/apache2 start
root 18 1 0 07:48 ? 00:00:00 /var/cfengine/bin/cf-execd -F
root 20 1 0 07:48 ? 00:00:00 /usr/sbin/sshd
root 32 1 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
www-data 34 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
www-data 35 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
www-data 36 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
root 93 20 0 07:48 ? 00:00:00 sshd: root@pts/0
root 105 93 0 07:48 pts/0 00:00:00 -bash
root 112 105 0 07:49 pts/0 00:00:00 ps -ef
If we stop apache2, it will be started again within a minute by CFEngine.
.. code-block:: bash
service apache2 status
Apache2 is running (pid 32).
service apache2 stop
* Stopping web server apache2 ... waiting [ OK ]
service apache2 status
Apache2 is NOT running.
# ... wait up to 1 minute...
service apache2 status
Apache2 is running (pid 173).
Adapting to your applications
-----------------------------
To make sure your applications get managed in the same manner, there are just two things you need to adjust from the above example:
* In the Dockerfile used above, install your applications instead of ``apache2`` and ``sshd``.
* When you start the container with ``docker run``, specify the command line arguments to your applications rather than ``apache2`` and ``sshd``.

View file

@ -1,56 +0,0 @@
:title: Sharing data between 2 couchdb databases
:description: Sharing data between 2 couchdb databases
:keywords: docker, example, package installation, networking, couchdb, data volumes
.. _running_couchdb_service:
CouchDB Service
===============
.. include:: example_header.inc
Here's an example of using data volumes to share the same data between
two CouchDB containers. This could be used for hot upgrades, testing
different versions of CouchDB on the same data, etc.
Create first database
---------------------
Note that we're marking ``/var/lib/couchdb`` as a data volume.
.. code-block:: bash
COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
Add data to the first database
------------------------------
We're assuming your Docker host is reachable at ``localhost``. If not,
replace ``localhost`` with the public IP of your Docker host.
.. code-block:: bash
HOST=localhost
URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/"
echo "Navigate to $URL in your browser, and use the couch interface to add data"
Create second database
----------------------
This time, we're requesting shared access to ``$COUCH1``'s volumes.
.. code-block:: bash
COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03)
Browse data on the second database
----------------------------------
.. code-block:: bash
HOST=localhost
URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/"
echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
Congratulations, you are now running two Couchdb containers, completely
isolated from each other *except* for their data.

View file

@ -1,181 +0,0 @@
:title: Hello world example
:description: A simple hello world example with Docker
:keywords: docker, example, hello world
.. _running_examples:
Check your Docker install
-------------------------
This guide assumes you have a working installation of Docker. To check
your Docker install, run the following command:
.. code-block:: bash
# Check that you have a working install
$ sudo docker info
If you get ``docker: command not found`` or something like
``/var/lib/docker/repositories: permission denied`` you may have an incomplete
Docker installation or insufficient privileges to access docker on your machine.
Please refer to :ref:`installation_list` for installation instructions.
.. _hello_world:
Hello World
-----------
.. include:: example_header.inc
This is the most basic example available for using Docker.
Download the small base image named ``busybox``:
.. code-block:: bash
# Download a busybox image
$ sudo docker pull busybox
The ``busybox`` image is a minimal Linux system. You can do the same
with any number of other images, such as ``debian``, ``ubuntu`` or ``centos``.
The images can be found and retrieved using the `Docker index`_.
.. _Docker index: http://index.docker.io
.. code-block:: bash
$ sudo docker run busybox /bin/echo hello world
This command will run a simple ``echo`` command, that will echo ``hello world`` back to the console over standard out.
**Explanation:**
- **"sudo"** execute the following commands as user *root*
- **"docker run"** run a command in a new container
- **"busybox"** is the image we are running the command in.
- **"/bin/echo"** is the command we want to run in the container
- **"hello world"** is the input for the echo command
**Video:**
See the example in action
.. raw:: html
<iframe width="560" height="400" frameborder="0"
sandbox="allow-same-origin allow-scripts"
srcdoc="<body><script type=&quot;text/javascript&quot;
src=&quot;https://asciinema.org/a/7658.js&quot;
id=&quot;asciicast-7658&quot; async></script></body>">
</iframe>
----
.. _hello_world_daemon:
Hello World Daemon
------------------
.. include:: example_header.inc
And now for the most boring daemon ever written!
We will use the Ubuntu image to run a simple hello world daemon that will just print hello
world to standard out every second. It will continue to do this until
we stop it.
**Steps:**
.. code-block:: bash
container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
We are going to run a simple hello world daemon in a new container
made from the ``ubuntu`` image.
- **"sudo docker run -d "** run a command in a new container. We pass "-d"
so it runs as a daemon.
- **"ubuntu"** is the image we want to run the command inside of.
- **"/bin/sh -c"** is the command we want to run in the container
- **"while true; do echo hello world; sleep 1; done"** is the mini
script we want to run, that will just print hello world once a
second until we stop it.
- **$container_id** the output of the run command will return a
container id, we can use in future commands to see what is going on
with this process.
.. code-block:: bash
sudo docker logs $container_id
Check the logs make sure it is working correctly.
- **"docker logs**" This will return the logs for a container
- **$container_id** The Id of the container we want the logs for.
.. code-block:: bash
sudo docker attach --sig-proxy=false $container_id
Attach to the container to see the results in real-time.
- **"docker attach**" This will allow us to attach to a background
process to see what is going on.
- **"--sig-proxy=false"** Do not forward signals to the container; allows
us to exit the attachment using Control-C without stopping the container.
- **$container_id** The Id of the container we want to attach to.
Exit from the container attachment by pressing Control-C.
.. code-block:: bash
sudo docker ps
Check the process list to make sure it is running.
- **"docker ps"** this shows all running process managed by docker
.. code-block:: bash
sudo docker stop $container_id
Stop the container, since we don't need it anymore.
- **"docker stop"** This stops a container
- **$container_id** The Id of the container we want to stop.
.. code-block:: bash
sudo docker ps
Make sure it is really stopped.
**Video:**
See the example in action
.. raw:: html
<iframe width="560" height="400" frameborder="0"
sandbox="allow-same-origin allow-scripts"
srcdoc="<body><script type=&quot;text/javascript&quot;
src=&quot;https://asciinema.org/a/2562.js&quot;
id=&quot;asciicast-2562&quot; async></script></body>">
</iframe>
The next example in the series is a :ref:`nodejs_web_app` example, or
you could skip to any of the other examples:
* :ref:`nodejs_web_app`
* :ref:`running_redis_service`
* :ref:`running_ssh_service`
* :ref:`running_couchdb_service`
* :ref:`postgresql_service`
* :ref:`mongodb_image`
* :ref:`python_web_app`

View file

@ -1,126 +0,0 @@
:title: Docker HTTPS Setup
:description: How to setup docker with https
:keywords: docker, example, https, daemon
.. _running_docker_https:
Running Docker with https
=========================
By default, Docker runs via a non-networked Unix socket. It can also optionally
communicate using a HTTP socket.
If you need Docker reachable via the network in a safe manner, you can enable
TLS by specifying the `tlsverify` flag and pointing Docker's `tlscacert` flag to a
trusted CA certificate.
In daemon mode, it will only allow connections from clients authenticated by a
certificate signed by that CA. In client mode, it will only connect to servers
with a certificate signed by that CA.
.. warning::
Using TLS and managing a CA is an advanced topic. Please make you self familiar
with openssl, x509 and tls before using it in production.
Create a CA, server and client keys with OpenSSL
------------------------------------------------
First, initialize the CA serial file and generate CA private and public keys:
.. code-block:: bash
$ echo 01 > ca.srl
$ openssl genrsa -des3 -out ca-key.pem
$ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem
Now that we have a CA, you can create a server key and certificate signing request.
Make sure that `"Common Name (e.g. server FQDN or YOUR name)"` matches the hostname you will use
to connect to Docker or just use '*' for a certificate valid for any hostname:
.. code-block:: bash
$ openssl genrsa -des3 -out server-key.pem
$ openssl req -new -key server-key.pem -out server.csr
Next we're going to sign the key with our CA:
.. code-block:: bash
$ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \
-out server-cert.pem
For client authentication, create a client key and certificate signing request:
.. code-block:: bash
$ openssl genrsa -des3 -out client-key.pem
$ openssl req -new -key client-key.pem -out client.csr
To make the key suitable for client authentication, create a extensions config file:
.. code-block:: bash
$ echo extendedKeyUsage = clientAuth > extfile.cnf
Now sign the key:
.. code-block:: bash
$ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \
-out client-cert.pem -extfile extfile.cnf
Finally you need to remove the passphrase from the client and server key:
.. code-block:: bash
$ openssl rsa -in server-key.pem -out server-key.pem
$ openssl rsa -in client-key.pem -out client-key.pem
Now you can make the Docker daemon only accept connections from clients providing
a certificate trusted by our CA:
.. code-block:: bash
$ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \
-H=0.0.0.0:4243
To be able to connect to Docker and validate its certificate, you now need to provide your client keys,
certificates and trusted CA:
.. code-block:: bash
$ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \
-H=dns-name-of-docker-host:4243
.. warning::
As shown in the example above, you don't have to run the ``docker``
client with ``sudo`` or the ``docker`` group when you use
certificate authentication. That means anyone with the keys can
give any instructions to your Docker daemon, giving them root
access to the machine hosting the daemon. Guard these keys as you
would a root password!
Other modes
-----------
If you don't want to have complete two-way authentication, you can run Docker in
various other modes by mixing the flags.
Daemon modes
~~~~~~~~~~~~
- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients
- tls, tlscert, tlskey: Do not authenticate clients
Client modes
~~~~~~~~~~~~
- tls: Authenticate server based on public/default CA pool
- tlsverify, tlscacert: Authenticate server based on given CA
- tls, tlscert, tlskey: Authenticate with client certificate, do not authenticate
server based on given CA
- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client certificate,
authenticate server based on given CA
The client will send its client certificate if found, so you just need to drop
your keys into `~/.docker/<ca, cert or key>.pem`

View file

@ -1,30 +0,0 @@
:title: Docker Examples
:description: Examples on how to use Docker
:keywords: docker, hello world, node, nodejs, python, couch, couchdb, redis, ssh, sshd, examples, postgresql, link
.. _example_list:
Examples
========
Here are some examples of how to use Docker to create running
processes, starting from a very simple *Hello World* and progressing
to more substantial services like those which you might find in production.
.. toctree::
:maxdepth: 1
hello_world
nodejs_web_app
running_redis_service
running_ssh_service
couchdb_data_volumes
postgresql_service
mongodb
running_riak_service
using_supervisord
cfengine_process_management
python_web_app
apt-cacher-ng
https

View file

@ -1,100 +0,0 @@
:title: Building a Docker Image with MongoDB
:description: How to build a Docker image with MongoDB pre-installed
:keywords: docker, example, package installation, networking, mongodb
.. _mongodb_image:
Building an Image with MongoDB
==============================
.. include:: example_header.inc
The goal of this example is to show how you can build your own
Docker images with MongoDB pre-installed. We will do that by
constructing a ``Dockerfile`` that downloads a base image, adds an
apt source and installs the database software on Ubuntu.
Creating a ``Dockerfile``
+++++++++++++++++++++++++
Create an empty file called ``Dockerfile``:
.. code-block:: bash
touch Dockerfile
Next, define the parent image you want to use to build your own image on top of.
Here, well use `Ubuntu <https://index.docker.io/_/ubuntu/>`_ (tag: ``latest``)
available on the `docker index <http://index.docker.io>`_:
.. code-block:: bash
FROM ubuntu:latest
Since we want to be running the latest version of MongoDB we'll need to add the
10gen repo to our apt sources list.
.. code-block:: bash
# Add 10gen official apt source to the sources list
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
Then, we don't want Ubuntu to complain about init not being available so we'll
divert ``/sbin/initctl`` to ``/bin/true`` so it thinks everything is working.
.. code-block:: bash
# Hack for initctl not being available in Ubuntu
RUN dpkg-divert --local --rename --add /sbin/initctl
RUN ln -sf /bin/true /sbin/initctl
Afterwards we'll be able to update our apt repositories and install MongoDB
.. code-block:: bash
# Install MongoDB
RUN apt-get update
RUN apt-get install mongodb-10gen
To run MongoDB we'll have to create the default data directory (because we want it to
run without needing to provide a special configuration file)
.. code-block:: bash
# Create the MongoDB data directory
RUN mkdir -p /data/db
Finally, we'll expose the standard port that MongoDB runs on, 27107, as well as
define an ``ENTRYPOINT`` instruction for the container.
.. code-block:: bash
EXPOSE 27017
ENTRYPOINT ["usr/bin/mongod"]
Now, lets build the image which will go through the ``Dockerfile`` we made and
run all of the commands.
.. code-block:: bash
sudo docker build -t <yourname>/mongodb .
Now you should be able to run ``mongod`` as a daemon and be able to connect on
the local port!
.. code-block:: bash
# Regular style
MONGO_ID=$(sudo docker run -P -d <yourname>/mongodb)
# Lean and mean
MONGO_ID=$(sudo docker run -P -d <yourname>/mongodb --noprealloc --smallfiles)
# Check the logs out
sudo docker logs $MONGO_ID
# Connect and play around
mongo --port <port you get from `docker ps`>
Sweet!

View file

@ -1,239 +0,0 @@
:title: Running a Node.js app on CentOS
:description: Installing and running a Node.js app on CentOS
:keywords: docker, example, package installation, node, centos
.. _nodejs_web_app:
Node.js Web App
===============
.. include:: example_header.inc
The goal of this example is to show you how you can build your own
Docker images from a parent image using a ``Dockerfile`` . We will do
that by making a simple Node.js hello world web application running on
CentOS. You can get the full source code at
https://github.com/gasi/docker-node-hello.
Create Node.js app
++++++++++++++++++
First, create a directory ``src`` where all the files would live. Then create a ``package.json`` file that describes your app and its
dependencies:
.. code-block:: json
{
"name": "docker-centos-hello",
"private": true,
"version": "0.0.1",
"description": "Node.js Hello World app on CentOS using docker",
"author": "Daniel Gasienica <daniel@gasienica.ch>",
"dependencies": {
"express": "3.2.4"
}
}
Then, create an ``index.js`` file that defines a web app using the
`Express.js <http://expressjs.com/>`_ framework:
.. code-block:: javascript
var express = require('express');
// Constants
var PORT = 8080;
// App
var app = express();
app.get('/', function (req, res) {
res.send('Hello World\n');
});
app.listen(PORT);
console.log('Running on http://localhost:' + PORT);
In the next steps, well look at how you can run this app inside a CentOS
container using Docker. First, youll need to build a Docker image of your app.
Creating a ``Dockerfile``
+++++++++++++++++++++++++
Create an empty file called ``Dockerfile``:
.. code-block:: bash
touch Dockerfile
Open the ``Dockerfile`` in your favorite text editor and add the following line
that defines the version of Docker the image requires to build
(this example uses Docker 0.3.4):
.. code-block:: bash
# DOCKER-VERSION 0.3.4
Next, define the parent image you want to use to build your own image on top of.
Here, well use `CentOS <https://index.docker.io/_/centos/>`_ (tag: ``6.4``)
available on the `Docker index`_:
.. code-block:: bash
FROM centos:6.4
Since were building a Node.js app, youll have to install Node.js as well as
npm on your CentOS image. Node.js is required to run your app and npm to install
your apps dependencies defined in ``package.json``.
To install the right package for CentOS, well use the instructions from the
`Node.js wiki`_:
.. code-block:: bash
# Enable EPEL for Node.js
RUN rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
# Install Node.js and npm
RUN yum install -y npm
To bundle your apps source code inside the Docker image, use the ``ADD``
instruction:
.. code-block:: bash
# Bundle app source
ADD . /src
Install your app dependencies using the ``npm`` binary:
.. code-block:: bash
# Install app dependencies
RUN cd /src; npm install
Your app binds to port ``8080`` so youll use the ``EXPOSE`` instruction
to have it mapped by the ``docker`` daemon:
.. code-block:: bash
EXPOSE 8080
Last but not least, define the command to run your app using ``CMD``
which defines your runtime, i.e. ``node``, and the path to our app,
i.e. ``src/index.js`` (see the step where we added the source to the
container):
.. code-block:: bash
CMD ["node", "/src/index.js"]
Your ``Dockerfile`` should now look like this:
.. code-block:: bash
# DOCKER-VERSION 0.3.4
FROM centos:6.4
# Enable EPEL for Node.js
RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
# Install Node.js and npm
RUN yum install -y npm
# Bundle app source
ADD . /src
# Install app dependencies
RUN cd /src; npm install
EXPOSE 8080
CMD ["node", "/src/index.js"]
Building your image
+++++++++++++++++++
Go to the directory that has your ``Dockerfile`` and run the following
command to build a Docker image. The ``-t`` flag lets you tag your
image so its easier to find later using the ``docker images``
command:
.. code-block:: bash
sudo docker build -t <your username>/centos-node-hello .
Your image will now be listed by Docker:
.. code-block:: bash
sudo docker images
> # Example
> REPOSITORY TAG ID CREATED
> centos 6.4 539c0211cd76 8 weeks ago
> gasi/centos-node-hello latest d64d3505b0d2 2 hours ago
Run the image
+++++++++++++
Running your image with ``-d`` runs the container in detached mode, leaving the
container running in the background. The ``-p`` flag redirects a public port to a private port in the container. Run the image you previously built:
.. code-block:: bash
sudo docker run -p 49160:8080 -d <your username>/centos-node-hello
Print the output of your app:
.. code-block:: bash
# Get container ID
sudo docker ps
# Print app output
sudo docker logs <container id>
> # Example
> Running on http://localhost:8080
Test
++++
To test your app, get the the port of your app that Docker mapped:
.. code-block:: bash
sudo docker ps
> # Example
> ID IMAGE COMMAND ... PORTS
> ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080
In the example above, Docker mapped the ``8080`` port of the container to
``49160``.
Now you can call your app using ``curl`` (install if needed via:
``sudo apt-get install curl``):
.. code-block:: bash
curl -i localhost:49160
> HTTP/1.1 200 OK
> X-Powered-By: Express
> Content-Type: text/html; charset=utf-8
> Content-Length: 12
> Date: Sun, 02 Jun 2013 03:53:22 GMT
> Connection: keep-alive
>
> Hello World
We hope this tutorial helped you get up and running with Node.js and
CentOS on Docker. You can get the full source code at
https://github.com/gasi/docker-node-hello.
Continue to :ref:`running_redis_service`.
.. _Node.js wiki: https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager#rhelcentosscientific-linux-6
.. _docker index: https://index.docker.io/

View file

@ -1,117 +0,0 @@
:title: PostgreSQL service How-To
:description: Running and installing a PostgreSQL service
:keywords: docker, example, package installation, postgresql
.. _postgresql_service:
PostgreSQL Service
==================
.. include:: example_header.inc
Installing PostgreSQL on Docker
-------------------------------
Assuming there is no Docker image that suits your needs in `the index`_, you
can create one yourself.
.. _the index: http://index.docker.io
Start by creating a new Dockerfile:
.. note::
This PostgreSQL setup is for development only purposes. Refer
to the PostgreSQL documentation to fine-tune these settings so that it
is suitably secure.
.. literalinclude:: postgresql_service.Dockerfile
Build an image from the Dockerfile assign it a name.
.. code-block:: bash
$ sudo docker build -t eg_postgresql .
And run the PostgreSQL server container (in the foreground):
.. code-block:: bash
$ sudo docker run --rm -P --name pg_test eg_postgresql
There are 2 ways to connect to the PostgreSQL server. We can use
:ref:`working_with_links_names`, or we can access it from our host (or the network).
.. note:: The ``--rm`` removes the container and its image when the container
exists successfully.
Using container linking
^^^^^^^^^^^^^^^^^^^^^^^
Containers can be linked to another container's ports directly using
``--link remote_name:local_alias`` in the client's ``docker run``. This will
set a number of environment variables that can then be used to connect:
.. code-block:: bash
$ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash
postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password
Connecting from your host system
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Assuming you have the postgresql-client installed, you can use the host-mapped port
to test as well. You need to use ``docker ps`` to find out what local host port the
container is mapped to first:
.. code-block:: bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test
$ psql -h localhost -p 49153 -d docker -U docker --password
Testing the database
^^^^^^^^^^^^^^^^^^^^
Once you have authenticated and have a ``docker =#`` prompt, you can
create a table and populate it.
.. code-block:: bash
psql (9.3.1)
Type "help" for help.
docker=# CREATE TABLE cities (
docker(# name varchar(80),
docker(# location point
docker(# );
CREATE TABLE
docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)');
INSERT 0 1
docker=# select * from cities;
name | location
---------------+-----------
San Francisco | (-194,53)
(1 row)
Using the container volumes
^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can use the defined volumes to inspect the PostgreSQL log files and to backup your
configuration and data:
.. code-block:: bash
docker run --rm --volumes-from pg_test -t -i busybox sh
/ # ls
bin etc lib linuxrc mnt proc run sys usr
dev home lib64 media opt root sbin tmp var
/ # ls /etc/postgresql/9.3/main/
environment pg_hba.conf postgresql.conf
pg_ctl.conf pg_ident.conf start.conf
/tmp # ls /var/log
ldconfig postgresql

View file

@ -1,145 +0,0 @@
:title: Python Web app example
:description: Building your own python web app using docker
:keywords: docker, example, python, web app
.. _python_web_app:
Python Web App
==============
.. include:: example_header.inc
While using Dockerfiles is the preferred way to create maintainable
and repeatable images, its useful to know how you can try things out
and then commit your live changes to an image.
The goal of this example is to show you how you can modify your own
Docker images by making changes to a running
container, and then saving the results as a new image. We will do
that by making a simple 'hello world' Flask web application image.
Download the initial image
--------------------------
Download the ``shykes/pybuilder`` Docker image from the ``http://index.docker.io``
registry.
This image contains a ``buildapp`` script to download the web app and then ``pip install``
any required modules, and a ``runapp`` script that finds the ``app.py`` and runs it.
.. _`shykes/pybuilder`: https://github.com/shykes/pybuilder
.. code-block:: bash
$ sudo docker pull shykes/pybuilder
.. note:: This container was built with a very old version of docker
(May 2013 - see `shykes/pybuilder`_ ), when the ``Dockerfile`` format was different,
but the image can still be used now.
Interactively make some modifications
-------------------------------------
We then start a new container running interactively using the image.
First, we set a ``URL`` variable that points to a tarball of a simple
helloflask web app, and then we run a command contained in the image called
``buildapp``, passing it the ``$URL`` variable. The container is
given a name ``pybuilder_run`` which we will use in the next steps.
While this example is simple, you could run any number of interactive commands,
try things out, and then exit when you're done.
.. code-block:: bash
$ sudo docker run -i -t --name pybuilder_run shykes/pybuilder bash
$$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz
$$ /usr/local/bin/buildapp $URL
[...]
$$ exit
Commit the container to create a new image
------------------------------------------
Save the changes we just made in the container to a new image called
``/builds/github.com/shykes/helloflask/master``. You now have 3 different
ways to refer to the container: name ``pybuilder_run``, short-id ``c8b2e8228f11``, or
long-id ``c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9``.
.. code-block:: bash
$ sudo docker commit pybuilder_run /builds/github.com/shykes/helloflask/master
c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9
Run the new image to start the web worker
-----------------------------------------
Use the new image to create a new container with
network port 5000 mapped to a local port
.. code-block:: bash
$ sudo docker run -d -p 5000 --name web_worker /builds/github.com/shykes/helloflask/master /usr/local/bin/runapp
- **"docker run -d "** run a command in a new container. We pass "-d"
so it runs as a daemon.
- **"-p 5000"** the web app is going to listen on this port, so it
must be mapped from the container to the host system.
- **/usr/local/bin/runapp** is the command which starts the web app.
View the container logs
-----------------------
View the logs for the new ``web_worker`` container and
if everything worked as planned you should see the line ``Running on
http://0.0.0.0:5000/`` in the log output.
To exit the view without stopping the container, hit Ctrl-C, or open another
terminal and continue with the example while watching the result in the logs.
.. code-block:: bash
$ sudo docker logs -f web_worker
* Running on http://0.0.0.0:5000/
See the webapp output
---------------------
Look up the public-facing port which is NAT-ed. Find the private port
used by the container and store it inside of the ``WEB_PORT`` variable.
Access the web app using the ``curl`` binary. If everything worked as planned you
should see the line ``Hello world!`` inside of your console.
.. code-block:: bash
$ WEB_PORT=$(sudo docker port web_worker 5000 | awk -F: '{ print $2 }')
# install curl if necessary, then ...
$ curl http://127.0.0.1:$WEB_PORT
Hello world!
Clean up example containers and images
--------------------------------------
.. code-block:: bash
$ sudo docker ps --all
List ``--all`` the Docker containers. If this container had already finished
running, it will still be listed here with a status of 'Exit 0'.
.. code-block:: bash
$ sudo docker stop web_worker
$ sudo docker rm web_worker pybuilder_run
$ sudo docker rmi /builds/github.com/shykes/helloflask/master shykes/pybuilder:latest
And now stop the running web worker, and delete the containers, so that we can
then delete the images that we used.

View file

@ -1,101 +0,0 @@
:title: Running a Redis service
:description: Installing and running an redis service
:keywords: docker, example, package installation, networking, redis
.. _running_redis_service:
Redis Service
=============
.. include:: example_header.inc
Very simple, no frills, Redis service attached to a web application using a link.
Create a docker container for Redis
-----------------------------------
Firstly, we create a ``Dockerfile`` for our new Redis image.
.. code-block:: bash
FROM debian:jessie
RUN apt-get update && apt-get install -y redis-server
EXPOSE 6379
ENTRYPOINT ["/usr/bin/redis-server"]
CMD ["--bind", "0.0.0.0"]
Next we build an image from our ``Dockerfile``. Replace ``<your username>``
with your own user name.
.. code-block:: bash
sudo docker build -t <your username>/redis .
Run the service
---------------
Use the image we've just created and name your container ``redis``.
Running the service with ``-d`` runs the container in detached mode, leaving the
container running in the background.
Importantly, we're not exposing any ports on our container. Instead we're going to
use a container link to provide access to our Redis database.
.. code-block:: bash
sudo docker run --name redis -d <your username>/redis
Create your web application container
-------------------------------------
Next we can create a container for our application. We're going to use the ``--link``
flag to create a link to the ``redis`` container we've just created with an alias of
``db``. This will create a secure tunnel to the ``redis`` container and expose the
Redis instance running inside that container to only this container.
.. code-block:: bash
sudo docker run --link redis:db -i -t ubuntu:12.10 /bin/bash
Once inside our freshly created container we need to install Redis to get the
``redis-cli`` binary to test our connection.
.. code-block:: bash
apt-get update
apt-get -y install redis-server
service redis-server stop
As we've used the ``--link redis:db`` option, Docker has created some environment
variables in our web application container.
.. code-block:: bash
env | grep DB_
# Should return something similar to this with your values
DB_NAME=/violet_wolf/db
DB_PORT_6379_TCP_PORT=6379
DB_PORT=tcp://172.17.0.33:6379
DB_PORT_6379_TCP=tcp://172.17.0.33:6379
DB_PORT_6379_TCP_ADDR=172.17.0.33
DB_PORT_6379_TCP_PROTO=tcp
We can see that we've got a small list of environment variables prefixed with ``DB``.
The ``DB`` comes from the link alias specified when we launched the container. Let's use
the ``DB_PORT_6379_TCP_ADDR`` variable to connect to our Redis container.
.. code-block:: bash
redis-cli -h $DB_PORT_6379_TCP_ADDR
redis 172.17.0.33:6379>
redis 172.17.0.33:6379> set docker awesome
OK
redis 172.17.0.33:6379> get docker
"awesome"
redis 172.17.0.33:6379> exit
We could easily use this or other environment variables in our web application to make a
connection to our ``redis`` container.

View file

@ -1,151 +0,0 @@
:title: Running a Riak service
:description: Build a Docker image with Riak pre-installed
:keywords: docker, example, package installation, networking, riak
Riak Service
==============================
.. include:: example_header.inc
The goal of this example is to show you how to build a Docker image with Riak
pre-installed.
Creating a ``Dockerfile``
+++++++++++++++++++++++++
Create an empty file called ``Dockerfile``:
.. code-block:: bash
touch Dockerfile
Next, define the parent image you want to use to build your image on top of.
Well use `Ubuntu <https://index.docker.io/_/ubuntu/>`_ (tag: ``latest``),
which is available on the `docker index <http://index.docker.io>`_:
.. code-block:: bash
# Riak
#
# VERSION 0.1.0
# Use the Ubuntu base image provided by dotCloud
FROM ubuntu:latest
MAINTAINER Hector Castro hector@basho.com
Next, we update the APT cache and apply any updates:
.. code-block:: bash
# Update the APT cache
RUN sed -i.bak 's/main$/main universe/' /etc/apt/sources.list
RUN apt-get update
RUN apt-get upgrade -y
After that, we install and setup a few dependencies:
- ``curl`` is used to download Basho's APT repository key
- ``lsb-release`` helps us derive the Ubuntu release codename
- ``openssh-server`` allows us to login to containers remotely and join Riak
nodes to form a cluster
- ``supervisor`` is used manage the OpenSSH and Riak processes
.. code-block:: bash
# Install and setup project dependencies
RUN apt-get install -y curl lsb-release supervisor openssh-server
RUN mkdir -p /var/run/sshd
RUN mkdir -p /var/log/supervisor
RUN locale-gen en_US en_US.UTF-8
ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf
RUN echo 'root:basho' | chpasswd
Next, we add Basho's APT repository:
.. code-block:: bash
RUN curl -s http://apt.basho.com/gpg/basho.apt.key | apt-key add --
RUN echo "deb http://apt.basho.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/basho.list
RUN apt-get update
After that, we install Riak and alter a few defaults:
.. code-block:: bash
# Install Riak and prepare it to run
RUN apt-get install -y riak
RUN sed -i.bak 's/127.0.0.1/0.0.0.0/' /etc/riak/app.config
RUN echo "ulimit -n 4096" >> /etc/default/riak
Almost there. Next, we add a hack to get us by the lack of ``initctl``:
.. code-block:: bash
# Hack for initctl
# See: https://github.com/dotcloud/docker/issues/1024
RUN dpkg-divert --local --rename --add /sbin/initctl
RUN ln -sf /bin/true /sbin/initctl
Then, we expose the Riak Protocol Buffers and HTTP interfaces, along with SSH:
.. code-block:: bash
# Expose Riak Protocol Buffers and HTTP interfaces, along with SSH
EXPOSE 8087 8098 22
Finally, run ``supervisord`` so that Riak and OpenSSH are started:
.. code-block:: bash
CMD ["/usr/bin/supervisord"]
Create a ``supervisord`` configuration file
+++++++++++++++++++++++++++++++++++++++++++
Create an empty file called ``supervisord.conf``. Make sure it's at the same
directory level as your ``Dockerfile``:
.. code-block:: bash
touch supervisord.conf
Populate it with the following program definitions:
.. code-block:: bash
[supervisord]
nodaemon=true
[program:sshd]
command=/usr/sbin/sshd -D
stdout_logfile=/var/log/supervisor/%(program_name)s.log
stderr_logfile=/var/log/supervisor/%(program_name)s.log
autorestart=true
[program:riak]
command=bash -c ". /etc/default/riak && /usr/sbin/riak console"
pidfile=/var/log/riak/riak.pid
stdout_logfile=/var/log/supervisor/%(program_name)s.log
stderr_logfile=/var/log/supervisor/%(program_name)s.log
Build the Docker image for Riak
+++++++++++++++++++++++++++++++
Now you should be able to build a Docker image for Riak:
.. code-block:: bash
docker build -t "<yourname>/riak" .
Next steps
++++++++++
Riak is a distributed database. Many production deployments consist of `at
least five nodes <http://basho.com/why-your-riak-cluster-should-have-at-least-
five-nodes/>`_. See the `docker-riak <https://github.com/hectcastro /docker-
riak>`_ project details on how to deploy a Riak cluster using Docker and
Pipework.

View file

@ -1,49 +0,0 @@
:title: Running an SSH service
:description: Installing and running an sshd service
:keywords: docker, example, package installation, networking
.. _running_ssh_service:
SSH Daemon Service
==================
.. include:: example_header.inc
The following Dockerfile sets up an sshd service in a container that you can use
to connect to and inspect other container's volumes, or to get quick access to a
test container.
.. literalinclude:: running_ssh_service.Dockerfile
Build the image using:
.. code-block:: bash
$ sudo docker build -t eg_sshd .
Then run it. You can then use ``docker port`` to find out what host port the container's
port 22 is mapped to:
.. code-block:: bash
$ sudo docker run -d -P --name test_sshd eg_sshd
$ sudo docker port test_sshd 22
0.0.0.0:49154
And now you can ssh to port ``49154`` on the Docker daemon's host IP address
(``ip address`` or ``ifconfig`` can tell you that):
.. code-block:: bash
$ ssh root@192.168.1.2 -p 49154
# The password is ``screencast``.
$$
Finally, clean up after your test by stopping and removing the container, and
then removing the image.
.. code-block:: bash
$ sudo docker stop test_sshd
$ sudo docker rm test_sshd
$ sudo docker rmi eg_sshd

View file

@ -1,128 +0,0 @@
:title: Using Supervisor with Docker
:description: How to use Supervisor process management with Docker
:keywords: docker, supervisor, process management
.. _using_supervisord:
Using Supervisor with Docker
============================
.. include:: example_header.inc
Traditionally a Docker container runs a single process when it is launched, for
example an Apache daemon or a SSH server daemon. Often though you want to run
more than one process in a container. There are a number of ways you can
achieve this ranging from using a simple Bash script as the value of your
container's ``CMD`` instruction to installing a process management tool.
In this example we're going to make use of the process management tool,
`Supervisor <http://supervisord.org/>`_, to manage multiple processes in our
container. Using Supervisor allows us to better control, manage, and restart the
processes we want to run. To demonstrate this we're going to install and manage both an
SSH daemon and an Apache daemon.
Creating a Dockerfile
---------------------
Let's start by creating a basic ``Dockerfile`` for our new image.
.. code-block:: bash
FROM ubuntu:latest
MAINTAINER examples@docker.io
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
RUN apt-get update
RUN apt-get upgrade -y
Installing Supervisor
---------------------
We can now install our SSH and Apache daemons as well as Supervisor in our container.
.. code-block:: bash
RUN apt-get install -y openssh-server apache2 supervisor
RUN mkdir -p /var/run/sshd
RUN mkdir -p /var/log/supervisor
Here we're installing the ``openssh-server``, ``apache2`` and ``supervisor``
(which provides the Supervisor daemon) packages. We're also creating two new
directories that are needed to run our SSH daemon and Supervisor.
Adding Supervisor's configuration file
--------------------------------------
Now let's add a configuration file for Supervisor. The default file is called
``supervisord.conf`` and is located in ``/etc/supervisor/conf.d/``.
.. code-block:: bash
ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf
Let's see what is inside our ``supervisord.conf`` file.
.. code-block:: bash
[supervisord]
nodaemon=true
[program:sshd]
command=/usr/sbin/sshd -D
[program:apache2]
command=/bin/bash -c "source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND"
The ``supervisord.conf`` configuration file contains directives that configure
Supervisor and the processes it manages. The first block ``[supervisord]``
provides configuration for Supervisor itself. We're using one directive,
``nodaemon`` which tells Supervisor to run interactively rather than daemonize.
The next two blocks manage the services we wish to control. Each block controls
a separate process. The blocks contain a single directive, ``command``, which
specifies what command to run to start each process.
Exposing ports and running Supervisor
-------------------------------------
Now let's finish our ``Dockerfile`` by exposing some required ports and
specifying the ``CMD`` instruction to start Supervisor when our container
launches.
.. code-block:: bash
EXPOSE 22 80
CMD ["/usr/bin/supervisord"]
Here we've exposed ports 22 and 80 on the container and we're running the
``/usr/bin/supervisord`` binary when the container launches.
Building our container
----------------------
We can now build our new container.
.. code-block:: bash
sudo docker build -t <yourname>/supervisord .
Running our Supervisor container
--------------------------------
Once we've got a built image we can launch a container from it.
.. code-block:: bash
sudo docker run -p 22 -p 80 -t -i <yourname>/supervisord
2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file)
2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing
2013-11-25 18:53:22,342 INFO supervisord started with pid 1
2013-11-25 18:53:23,346 INFO spawned: 'sshd' with pid 6
2013-11-25 18:53:23,349 INFO spawned: 'apache2' with pid 7
. . .
We've launched a new container interactively using the ``docker run`` command.
That container has run Supervisor and launched the SSH and Apache daemons with
it. We've specified the ``-p`` flag to expose ports 22 and 80. From here we can
now identify the exposed ports and connect to one or both of the SSH and Apache
daemons.

View file

@ -1,224 +0,0 @@
:title: FAQ
:description: Most frequently asked questions.
:keywords: faq, questions, documentation, docker
FAQ
===
Most frequently asked questions.
--------------------------------
How much does Docker cost?
..........................
Docker is 100% free, it is open source, so you can use it without paying.
What open source license are you using?
.......................................
We are using the Apache License Version 2.0, see it here:
https://github.com/dotcloud/docker/blob/master/LICENSE
Does Docker run on Mac OS X or Windows?
.......................................
Not at this time, Docker currently only runs on Linux, but you can
use VirtualBox to run Docker in a virtual machine on your box, and
get the best of both worlds. Check out the :ref:`macosx` and
:ref:`windows` installation guides. The small Linux distribution boot2docker
can be run inside virtual machines on these two operating systems.
How do containers compare to virtual machines?
..............................................
They are complementary. VMs are best used to allocate chunks of
hardware resources. Containers operate at the process level, which
makes them very lightweight and perfect as a unit of software
delivery.
What does Docker add to just plain LXC?
.......................................
Docker is not a replacement for LXC. "LXC" refers to capabilities
of the Linux kernel (specifically namespaces and control groups)
which allow sandboxing processes from one another, and controlling
their resource allocations. On top of this low-level foundation of
kernel features, Docker offers a high-level tool with several
powerful functionalities:
* *Portable deployment across machines.*
Docker defines a format for bundling an application and all its
dependencies into a single object which can be transferred to
any Docker-enabled machine, and executed there with the
guarantee that the execution environment exposed to the
application will be the same. LXC implements process sandboxing,
which is an important pre-requisite for portable deployment, but
that alone is not enough for portable deployment. If you sent me
a copy of your application installed in a custom LXC
configuration, it would almost certainly not run on my machine
the way it does on yours, because it is tied to your machine's
specific configuration: networking, storage, logging, distro,
etc. Docker defines an abstraction for these machine-specific
settings, so that the exact same Docker container can run -
unchanged - on many different machines, with many different
configurations.
* *Application-centric.*
Docker is optimized for the deployment of applications, as
opposed to machines. This is reflected in its API, user
interface, design philosophy and documentation. By contrast, the
``lxc`` helper scripts focus on containers as lightweight
machines - basically servers that boot faster and need less
RAM. We think there's more to containers than just that.
* *Automatic build.*
Docker includes :ref:`a tool for developers to automatically
assemble a container from their source code <dockerbuilder>`,
with full control over application dependencies, build tools,
packaging etc. They are free to use ``make, maven, chef, puppet,
salt,`` Debian packages, RPMs, source tarballs, or any
combination of the above, regardless of the configuration of the
machines.
* *Versioning.*
Docker includes git-like capabilities for tracking successive
versions of a container, inspecting the diff between versions,
committing new versions, rolling back etc. The history also
includes how a container was assembled and by whom, so you get
full traceability from the production server all the way back to
the upstream developer. Docker also implements incremental
uploads and downloads, similar to ``git pull``, so new versions
of a container can be transferred by only sending diffs.
* *Component re-use.*
Any container can be used as a :ref:`"base image"
<base_image_def>` to create more specialized components. This
can be done manually or as part of an automated build. For
example you can prepare the ideal Python environment, and use it
as a base for 10 different applications. Your ideal Postgresql
setup can be re-used for all your future projects. And so on.
* *Sharing.*
Docker has access to a `public registry
<http://index.docker.io>`_ where thousands of people have
uploaded useful containers: anything from Redis, CouchDB,
Postgres to IRC bouncers to Rails app servers to Hadoop to base
images for various Linux distros. The :ref:`registry
<registryindexspec>` also includes an official "standard
library" of useful containers maintained by the Docker team. The
registry itself is open-source, so anyone can deploy their own
registry to store and transfer private containers, for internal
server deployments for example.
* *Tool ecosystem.*
Docker defines an API for automating and customizing the
creation and deployment of containers. There are a huge number
of tools integrating with Docker to extend its
capabilities. PaaS-like deployment (Dokku, Deis, Flynn),
multi-node orchestration (Maestro, Salt, Mesos, Openstack Nova),
management dashboards (docker-ui, Openstack Horizon, Shipyard),
configuration management (Chef, Puppet), continuous integration
(Jenkins, Strider, Travis), etc. Docker is rapidly establishing
itself as the standard for container-based tooling.
What is different between a Docker container and a VM?
......................................................
There's a great StackOverflow answer `showing the differences <http://stackoverflow.com/questions/16047306/how-is-docker-io-different-from-a-normal-virtual-machine>`_.
Do I lose my data when the container exits?
...........................................
Not at all! Any data that your application writes to disk gets preserved
in its container until you explicitly delete the container. The file
system for the container persists even after the container halts.
How far do Docker containers scale?
...................................
Some of the largest server farms in the world today are based on containers.
Large web deployments like Google and Twitter, and platform providers such as
Heroku and dotCloud all run on container technology, at a scale of hundreds of
thousands or even millions of containers running in parallel.
How do I connect Docker containers?
...................................
Currently the recommended way to link containers is via the `link` primitive.
You can see details of how to `work with links here
<http://docs.docker.io/en/latest/use/working_with_links_names/>`_.
Also of useful when enabling more flexible service portability is the
`Ambassador linking pattern
<http://docs.docker.io/en/latest/use/ambassador_pattern_linking/>`_.
How do I run more than one process in a Docker container?
.........................................................
Any capable process supervisor such as http://supervisord.org/, runit, s6, or
daemontools can do the trick. Docker will start up the process management
daemon which will then fork to run additional processes. As long as the
processor manager daemon continues to run, the container will continue to as
well. You can see a more substantial example `that uses supervisord here
<http://docs.docker.io/en/latest/examples/using_supervisord/>`_.
What platforms does Docker run on?
..................................
Linux:
- Ubuntu 12.04, 13.04 et al
- Fedora 19/20+
- RHEL 6.5+
- Centos 6+
- Gentoo
- ArchLinux
- openSUSE 12.3+
- CRUX 3.0+
Cloud:
- Amazon EC2
- Google Compute Engine
- Rackspace
How do I report a security issue with Docker?
.............................................
You can learn about the project's security policy `here <http://www.docker.io/security/>`_
and report security issues to this `mailbox <mailto:security@docker.com>`_.
Why do I need to sign my commits to Docker with the DCO?
........................................................
Please read `our blog post <http://blog.docker.io/2014/01/docker-code-contributions-require-developer-certificate-of-origin/>`_ on the introduction of the DCO.
Can I help by adding some questions and answers?
................................................
Definitely! You can fork `the repo`_ and edit the documentation sources.
Where can I find more answers?
..............................
You can find more answers on:
* `Docker user mailinglist`_
* `Docker developer mailinglist`_
* `IRC, docker on freenode`_
* `GitHub`_
* `Ask questions on Stackoverflow`_
* `Join the conversation on Twitter`_
.. _Docker user mailinglist: https://groups.google.com/d/forum/docker-user
.. _Docker developer mailinglist: https://groups.google.com/d/forum/docker-dev
.. _the repo: http://www.github.com/dotcloud/docker
.. _IRC, docker on freenode: irc://chat.freenode.net#docker
.. _Github: http://www.github.com/dotcloud/docker
.. _Ask questions on Stackoverflow: http://stackoverflow.com/search?q=docker
.. _Join the conversation on Twitter: http://twitter.com/docker
Looking for something else to read? Checkout the :ref:`hello_world` example.

View file

@ -1,107 +0,0 @@
:title: Installation on Amazon EC2
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: amazon ec2, virtualization, cloud, docker, documentation, installation
Amazon EC2
==========
.. include:: install_header.inc
There are several ways to install Docker on AWS EC2:
* :ref:`amazonquickstart_new` or
* :ref:`amazonquickstart` or
* :ref:`amazonstandard`
**You'll need an** `AWS account <http://aws.amazon.com/>`_ **first, of course.**
.. _amazonquickstart:
Amazon QuickStart
-----------------
1. **Choose an image:**
* Launch the `Create Instance Wizard
<https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
on your AWS Console.
* Click the ``Select`` button for a 64Bit Ubuntu image. For example: Ubuntu Server 12.04.3 LTS
* For testing you can use the default (possibly free)
``t1.micro`` instance (more info on `pricing
<http://aws.amazon.com/en/ec2/pricing/>`_).
* Click the ``Next: Configure Instance Details`` button at the bottom right.
2. **Tell CloudInit to install Docker:**
* When you're on the "Configure Instance Details" step, expand the "Advanced
Details" section.
* Under "User data", select "As text".
* Enter ``#include https://get.docker.io`` into the instance *User Data*.
`CloudInit <https://help.ubuntu.com/community/CloudInit>`_ is part of the
Ubuntu image you chose; it will bootstrap Docker by running the shell
script located at this URL.
3. After a few more standard choices where defaults are probably ok, your AWS
Ubuntu instance with Docker should be running!
**If this is your first AWS instance, you may need to set up your
Security Group to allow SSH.** By default all incoming ports to your
new instance will be blocked by the AWS Security Group, so you might
just get timeouts when you try to connect.
Installing with ``get.docker.io`` (as above) will create a service named
``lxc-docker``. It will also set up a :ref:`docker group <dockergroup>` and you
may want to add the *ubuntu* user to it so that you don't have to use ``sudo``
for every Docker command.
Once you've got Docker installed, you're ready to try it out -- head
on over to the :doc:`../use/basics` or :doc:`../examples/index` section.
.. _amazonquickstart_new:
Amazon QuickStart (Release Candidate - March 2014)
--------------------------------------------------
Amazon just published new Docker-ready AMIs (2014.03 Release Candidate). Docker packages
can now be installed from Amazon's provided Software Repository.
1. **Choose an image:**
* Launch the `Create Instance Wizard
<https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
on your AWS Console.
* Click the ``Community AMI`` menu option on the left side
* Search for '2014.03' and select one of the Amazon provided AMI, for example ``amzn-ami-pv-2014.03.rc-0.x86_64-ebs``
* For testing you can use the default (possibly free)
``t1.micro`` instance (more info on `pricing
<http://aws.amazon.com/en/ec2/pricing/>`_).
* Click the ``Next: Configure Instance Details`` button at the bottom right.
2. After a few more standard choices where defaults are probably ok, your Amazon
Linux instance should be running!
3. SSH to your instance to install Docker : ``ssh -i <path to your private key> ec2-user@<your public IP address>``
4. Once connected to the instance, type ``sudo yum install -y docker ; sudo service docker start`` to install and start Docker
.. _amazonstandard:
Standard Ubuntu Installation
----------------------------
If you want a more hands-on installation, then you can follow the
:ref:`ubuntu_linux` instructions installing Docker on any EC2 instance
running Ubuntu. Just follow Step 1 from :ref:`amazonquickstart` to
pick an image (or use one of your own) and skip the step with the
*User Data*. Then continue with the :ref:`ubuntu_linux` instructions.
Continue with the :ref:`hello_world` example.

View file

@ -1,73 +0,0 @@
:title: Installation on Arch Linux
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: arch linux, virtualization, docker, documentation, installation
.. _arch_linux:
Arch Linux
==========
.. include:: install_header.inc
.. include:: install_unofficial.inc
Installing on Arch Linux can be handled via the package in community:
* `docker <https://www.archlinux.org/packages/community/x86_64/docker/>`_
or the following AUR package:
* `docker-git <https://aur.archlinux.org/packages/docker-git/>`_
The docker package will install the latest tagged version of docker.
The docker-git package will build from the current master branch.
Dependencies
------------
Docker depends on several packages which are specified as dependencies in
the packages. The core dependencies are:
* bridge-utils
* device-mapper
* iproute2
* lxc
* sqlite
Installation
------------
For the normal package a simple
::
pacman -S docker
is all that is needed.
For the AUR package execute:
::
yaourt -S docker-git
The instructions here assume **yaourt** is installed. See
`Arch User Repository <https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages>`_
for information on building and installing packages from the AUR if you have not
done so before.
Starting Docker
---------------
There is a systemd service unit created for docker. To start the docker service:
::
sudo systemctl start docker
To start on system boot:
::
sudo systemctl enable docker

View file

@ -1,123 +0,0 @@
:title: Installation from Binaries
:description: This instruction set is meant for hackers who want to try out Docker on a variety of environments.
:keywords: binaries, installation, docker, documentation, linux
.. _binaries:
Binaries
========
.. include:: install_header.inc
**This instruction set is meant for hackers who want to try out Docker
on a variety of environments.**
Before following these directions, you should really check if a
packaged version of Docker is already available for your distribution.
We have packages for many distributions, and more keep showing up all
the time!
Check runtime dependencies
--------------------------
.. DOC COMMENT: this should be kept in sync with
https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md#runtime-dependencies
To run properly, docker needs the following software to be installed at runtime:
- iptables version 1.4 or later
- Git version 1.7 or later
- procps (or similar provider of a "ps" executable)
- XZ Utils 4.9 or later
- a `properly mounted
<https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount>`_
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point `is
<https://github.com/dotcloud/docker/issues/2683>`_ `not
<https://github.com/dotcloud/docker/issues/3485>`_ `sufficient
<https://github.com/dotcloud/docker/issues/4568>`_)
Check kernel dependencies
-------------------------
Docker in daemon mode has specific kernel requirements. For details,
check your distribution in :ref:`installation_list`.
In general, a 3.8 Linux kernel (or higher) is preferred, as some of the
prior versions have known issues that are triggered by Docker.
Note that Docker also has a client mode, which can run on virtually
any Linux kernel (it even builds on OSX!).
Get the docker binary:
----------------------
.. code-block:: bash
wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker
chmod +x docker
.. note::
If you have trouble downloading the binary, you can also get the smaller
compressed release file: https://get.docker.io/builds/Linux/x86_64/docker-latest.tgz
Run the docker daemon
---------------------
.. code-block:: bash
# start the docker in daemon mode from the directory you unpacked
sudo ./docker -d &
.. _dockergroup:
Giving non-root access
----------------------
The ``docker`` daemon always runs as the root user, and since Docker
version 0.5.2, the ``docker`` daemon binds to a Unix socket instead of
a TCP port. By default that Unix socket is owned by the user *root*,
and so, by default, you can access it with ``sudo``.
Starting in version 0.5.3, if you (or your Docker installer) create a
Unix group called *docker* and add users to it, then the ``docker``
daemon will make the ownership of the Unix socket read/writable by the
*docker* group when the daemon starts. The ``docker`` daemon must
always run as the root user, but if you run the ``docker`` client as a
user in the *docker* group then you don't need to add ``sudo`` to all
the client commands.
.. warning:: The *docker* group (or the group specified with ``-G``) is
root-equivalent; see :ref:`dockersecurity_daemon` details.
Upgrades
--------
To upgrade your manual installation of Docker, first kill the docker
daemon:
.. code-block:: bash
killall docker
Then follow the regular installation steps.
Run your first container!
-------------------------
.. code-block:: bash
# check your docker version
sudo ./docker version
# run a container and open an interactive shell in the container
sudo ./docker run -i -t ubuntu /bin/bash
Continue with the :ref:`hello_world` example.

View file

@ -1,98 +0,0 @@
:title: Installation on CRUX Linux
:description: Docker installation on CRUX Linux.
:keywords: crux linux, virtualization, Docker, documentation, installation
.. _crux_linux:
CRUX Linux
==========
.. include:: install_header.inc
.. include:: install_unofficial.inc
Installing on CRUX Linux can be handled via the ports from `James Mills <http://prologic.shortcircuit.net.au/>`_:
* `docker <https://bitbucket.org/prologic/ports/src/tip/docker/>`_
* `docker-bin <https://bitbucket.org/prologic/ports/src/tip/docker-bin/>`_
* `docker-git <https://bitbucket.org/prologic/ports/src/tip/docker-git/>`_
The ``docker`` port will install the latest tagged version of Docker.
The ``docker-bin`` port will install the latest tagged versin of Docker from upstream built binaries.
The ``docker-git`` package will build from the current master branch.
Installation
------------
For the time being (*until the CRUX Docker port(s) get into the official contrib repository*) you will need to install
`James Mills' <https://bitbucket.org/prologic/ports>`_ ports repository. You can do so via:
Download the ``httpup`` file to ``/etc/ports/``:
::
curl -q -o - http://crux.nu/portdb/?a=getup&q=prologic > /etc/ports/prologic.httpup
Add ``prtdir /usr/ports/prologic`` to ``/etc/prt-get.conf``:
::
vim /etc/prt-get.conf
# or:
echo "prtdir /usr/ports/prologic" >> /etc/prt-get.conf
Update ports and prt-get cache:
::
ports -u
prt-get cache
To install (*and its dependencies*):
::
prt-get depinst docker
Use ``docker-bin`` for the upstream binary or ``docker-git`` to build and install from the master branch from git.
Kernel Requirements
-------------------
To have a working **CRUX+Docker** Host you must ensure your Kernel
has the necessary modules enabled for LXC containers to function
correctly and Docker Daemon to work properly.
Please read the ``README.rst``:
::
prt-get readme docker
There is a ``test_kernel_config.sh`` script in the above ports which you can use to test your Kernel configuration:
::
cd /usr/ports/prologic/docker
./test_kernel_config.sh /usr/src/linux/.config
Starting Docker
---------------
There is a rc script created for Docker. To start the Docker service:
::
sudo su -
/etc/rc.d/docker start
To start on system boot:
- Edit ``/etc/rc.conf``
- Put ``docker`` into the ``SERVICES=(...)`` array after ``net``.

View file

@ -1,75 +0,0 @@
:title: Installation on Fedora
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Docker, Docker documentation, Fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
.. _fedora:
Fedora
======
.. include:: install_header.inc
.. include:: install_unofficial.inc
Docker is available in **Fedora 19 and later**. Please note that due to the
current Docker limitations Docker is able to run only on the **64 bit**
architecture.
Installation
------------
The ``docker-io`` package provides Docker on Fedora.
If you have the (unrelated) ``docker`` package installed already, it will
conflict with ``docker-io``. There's a `bug report`_ filed for it.
To proceed with ``docker-io`` installation on Fedora 19 or Fedora 20, please
remove ``docker`` first.
.. code-block:: bash
sudo yum -y remove docker
For Fedora 21 and later, the ``wmdocker`` package will provide the same
functionality as the old ``docker`` and will also not conflict with ``docker-io``.
.. code-block:: bash
sudo yum -y install wmdocker
sudo yum -y remove docker
Install the ``docker-io`` package which will install Docker on our host.
.. code-block:: bash
sudo yum -y install docker-io
To update the ``docker-io`` package:
.. code-block:: bash
sudo yum -y update docker-io
Now that it's installed, let's start the Docker daemon.
.. code-block:: bash
sudo systemctl start docker
If we want Docker to start at boot, we should also:
.. code-block:: bash
sudo systemctl enable docker
Now let's verify that Docker is working.
.. code-block:: bash
sudo docker run -i -t fedora /bin/bash
**Done!**, now continue with the :ref:`hello_world` example.
.. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676

View file

@ -1,62 +0,0 @@
:title: Installation on FrugalWare
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: frugalware linux, virtualization, docker, documentation, installation
.. _frugalware:
FrugalWare
==========
.. include:: install_header.inc
.. include:: install_unofficial.inc
Installing on FrugalWare is handled via the official packages:
* `lxc-docker i686 <http://www.frugalware.org/packages/200141>`_
* `lxc-docker x86_64 <http://www.frugalware.org/packages/200130>`_
The `lxc-docker` package will install the latest tagged version of Docker.
Dependencies
------------
Docker depends on several packages which are specified as dependencies in
the packages. The core dependencies are:
* systemd
* lvm2
* sqlite3
* libguestfs
* lxc
* iproute2
* bridge-utils
Installation
------------
A simple
::
pacman -S lxc-docker
is all that is needed.
Starting Docker
---------------
There is a systemd service unit created for Docker. To start Docker as service:
::
sudo systemctl start lxc-docker
To start on system boot:
::
sudo systemctl enable lxc-docker

View file

@ -1,84 +0,0 @@
:title: Installation on Gentoo
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: gentoo linux, virtualization, docker, documentation, installation
.. _gentoo_linux:
Gentoo
======
.. include:: install_header.inc
.. include:: install_unofficial.inc
Installing Docker on Gentoo Linux can be accomplished using one of two methods.
The first and best way if you're looking for a stable experience is to use the
official `app-emulation/docker` package directly in the portage tree.
If you're looking for a ``-bin`` ebuild, a live ebuild, or bleeding edge
ebuild changes/fixes, the second installation method is to use the overlay
provided at https://github.com/tianon/docker-overlay which can be added using
``app-portage/layman``. The most accurate and up-to-date documentation for
properly installing and using the overlay can be found in `the overlay README
<https://github.com/tianon/docker-overlay/blob/master/README.md#using-this-overlay>`_.
Note that sometimes there is a disparity between the latest version and what's
in the overlay, and between the latest version in the overlay and what's in the
portage tree. Please be patient, and the latest version should propagate
shortly.
Installation
^^^^^^^^^^^^
The package should properly pull in all the necessary dependencies and prompt
for all necessary kernel options. The ebuilds for 0.7+ include use flags to
pull in the proper dependencies of the major storage drivers, with the
"device-mapper" use flag being enabled by default, since that is the simplest
installation path.
.. code-block:: bash
sudo emerge -av app-emulation/docker
If any issues arise from this ebuild or the resulting binary, including and
especially missing kernel configuration flags and/or dependencies, `open an
issue on the docker-overlay repository
<https://github.com/tianon/docker-overlay/issues>`_ or ping tianon directly in
the #docker IRC channel on the freenode network.
Starting Docker
^^^^^^^^^^^^^^^
Ensure that you are running a kernel that includes all the necessary modules
and/or configuration for LXC (and optionally for device-mapper and/or AUFS,
depending on the storage driver you've decided to use).
OpenRC
------
To start the docker daemon:
.. code-block:: bash
sudo /etc/init.d/docker start
To start on system boot:
.. code-block:: bash
sudo rc-update add docker default
systemd
-------
To start the docker daemon:
.. code-block:: bash
sudo systemctl start docker.service
To start on system boot:
.. code-block:: bash
sudo systemctl enable docker.service

View file

@ -1,58 +0,0 @@
:title: Installation on Google Cloud Platform
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Docker, Docker documentation, installation, google, Google Compute Engine, Google Cloud Platform
`Google Cloud Platform <https://cloud.google.com/>`_
====================================================
.. include:: install_header.inc
.. _googlequickstart:
`Compute Engine <https://developers.google.com/compute>`_ QuickStart for `Debian <https://www.debian.org>`_
-----------------------------------------------------------------------------------------------------------
1. Go to `Google Cloud Console <https://cloud.google.com/console>`_ and create a new Cloud Project with `Compute Engine enabled <https://developers.google.com/compute/docs/signup>`_.
2. Download and configure the `Google Cloud SDK <https://developers.google.com/cloud/sdk/>`_ to use your project with the following commands:
.. code-block:: bash
$ curl https://dl.google.com/dl/cloudsdk/release/install_google_cloud_sdk.bash | bash
$ gcloud auth login
Enter a cloud project id (or leave blank to not set): <google-cloud-project-id>
3. Start a new instance, select a zone close to you and the desired instance size:
.. code-block:: bash
$ gcutil addinstance docker-playground --image=backports-debian-7
1: europe-west1-a
...
4: us-central1-b
>>> <zone-index>
1: machineTypes/n1-standard-1
...
12: machineTypes/g1-small
>>> <machine-type-index>
4. Connect to the instance using SSH:
.. code-block:: bash
$ gcutil ssh docker-playground
docker-playground:~$
5. Install the latest Docker release and configure it to start when the instance boots:
.. code-block:: bash
docker-playground:~$ curl get.docker.io | bash
docker-playground:~$ sudo update-rc.d docker defaults
6. Start a new container:
.. code-block:: bash
docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/'
docker on GCE \o/

View file

@ -1,34 +0,0 @@
:title: Docker Installation
:description: many ways to install Docker
:keywords: docker, installation
.. _installation_list:
Installation
============
There are a number of ways to install Docker, depending on where you
want to run the daemon. The :ref:`ubuntu_linux` installation is the
officially-tested version. The community adds more techniques for
installing Docker all the time.
Contents:
.. toctree::
:maxdepth: 1
ubuntulinux
rhel
fedora
archlinux
cruxlinux
gentoolinux
openSUSE
frugalware
mac
windows
amazon
rackspace
google
softlayer
binaries

View file

@ -1,212 +0,0 @@
:title: Installation on Mac OS X 10.6 Snow Leopard
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Docker, Docker documentation, requirements, virtualbox, ssh, linux, os x, osx, mac
.. _macosx:
========
Mac OS X
========
.. note::
These instructions are available with the new release of Docker
(version 0.8). However, they are subject to change.
.. include:: install_header.inc
Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer.
How To Install Docker On Mac OS X
=================================
VirtualBox
----------
Docker on OS X needs VirtualBox to run. To begin with, head over to
`VirtualBox Download Page`_ and get the tool for ``OS X hosts x86/amd64``.
.. _VirtualBox Download Page: https://www.virtualbox.org/wiki/Downloads
Once the download is complete, open the disk image, run the set up file
(i.e. ``VirtualBox.pkg``) and install VirtualBox. Do not simply copy the
package without running the installer.
boot2docker
-----------
`boot2docker`_ provides a handy script to easily manage the VM running the
``docker`` daemon. It also takes care of the installation for the OS image
that is used for the job.
.. _GitHub page: https://github.com/boot2docker/boot2docker
With Homebrew
~~~~~~~~~~~~~
If you are using Homebrew on your machine, simply run the following command to install ``boot2docker``:
.. code-block:: bash
brew install boot2docker
Manual installation
~~~~~~~~~~~~~~~~~~~
Open up a new terminal window, if you have not already.
Run the following commands to get boot2docker:
.. code-block:: bash
# Enter the installation directory
cd ~/bin
# Get the file
curl https://raw.github.com/boot2docker/boot2docker/master/boot2docker > boot2docker
# Mark it executable
chmod +x boot2docker
Docker OS X Client
------------------
The ``docker`` daemon is accessed using the ``docker`` client.
With Homebrew
~~~~~~~~~~~~~
Run the following command to install the ``docker`` client:
.. code-block:: bash
brew install docker
Manual installation
~~~~~~~~~~~~~~~~~~~
Run the following commands to get it downloaded and set up:
.. code-block:: bash
# Get the docker client file
DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \
curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \
gunzip $DIR/ld.tgz && \
tar xvf $DIR/ld.tar -C $DIR/ && \
cp $DIR/usr/local/bin/docker ./docker
# Set the environment variable for the docker daemon
export DOCKER_HOST=tcp://127.0.0.1:4243
# Copy the executable file
sudo cp docker /usr/local/bin/
And thats it! Lets check out how to use it.
How To Use Docker On Mac OS X
=============================
The ``docker`` daemon (via boot2docker)
---------------------------------------
Inside the ``~/bin`` directory, run the following commands:
.. code-block:: bash
# Initiate the VM
./boot2docker init
# Run the VM (the docker daemon)
./boot2docker up
# To see all available commands:
./boot2docker
# Usage ./boot2docker {init|start|up|pause|stop|restart|status|info|delete|ssh|download}
The ``docker`` client
---------------------
Once the VM with the ``docker`` daemon is up, you can use the ``docker``
client just like any other application.
.. code-block:: bash
docker version
# Client version: 0.7.6
# Go version (client): go1.2
# Git commit (client): bc3b2ec
# Server version: 0.7.5
# Git commit (server): c348c04
# Go version (server): go1.2
Forwarding VM Port Range to Host
--------------------------------
If we take the port range that docker uses by default with the -P option
(49000-49900), and forward same range from host to vm, we'll be able to interact
with our containers as if they were running locally:
.. code-block:: bash
# vm must be powered off
for i in {49000..49900}; do
VBoxManage modifyvm "boot2docker-vm" --natpf1 "tcp-port$i,tcp,,$i,,$i";
VBoxManage modifyvm "boot2docker-vm" --natpf1 "udp-port$i,udp,,$i,,$i";
done
SSH-ing The VM
--------------
If you feel the need to connect to the VM, you can simply run:
.. code-block:: bash
./boot2docker ssh
# User: docker
# Pwd: tcuser
You can now continue with the :ref:`hello_world` example.
Learn More
==========
boot2docker:
------------
See the GitHub page for `boot2docker`_.
.. _boot2docker: https://github.com/boot2docker/boot2docker
If SSH complains about keys:
----------------------------
.. code-block:: bash
ssh-keygen -R '[localhost]:2022'
Upgrading to a newer release of boot2docker
-------------------------------------------
To upgrade an initialised VM, you can use the following 3 commands. Your persistence
disk will not be changed, so you won't lose your images and containers:
.. code-block:: bash
./boot2docker stop
./boot2docker download
./boot2docker start
About the way Docker works on Mac OS X:
---------------------------------------
Docker has two key components: the ``docker`` daemon and the ``docker``
client. The tool works by client commanding the daemon. In order to
work and do its magic, the daemon makes use of some Linux Kernel
features (e.g. LXC, name spaces etc.), which are not supported by OS X.
Therefore, the solution of getting Docker to run on OS X consists of
running it inside a lightweight virtual machine. In order to simplify
things, Docker comes with a bash script to make this whole process as
easy as possible (i.e. boot2docker).

View file

@ -1,73 +0,0 @@
:title: Installation on openSUSE
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: openSUSE, virtualbox, docker, documentation, installation
.. _openSUSE:
openSUSE
========
.. include:: install_header.inc
.. include:: install_unofficial.inc
Docker is available in **openSUSE 12.3 and later**. Please note that due to the
current Docker limitations Docker is able to run only on the **64 bit**
architecture.
Installation
------------
The ``docker`` package from the `Virtualization project`_ on `OBS`_ provides
Docker on openSUSE.
To proceed with Docker installation please add the right Virtualization
repository.
.. code-block:: bash
# openSUSE 12.3
sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization
# openSUSE 13.1
sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization
Install the Docker package.
.. code-block:: bash
sudo zypper in docker
It's also possible to install Docker using openSUSE's 1-click install. Just
visit `this`_ page, select your openSUSE version and click on the installation
link. This will add the right repository to your system and it will
also install the `docker` package.
Now that it's installed, let's start the Docker daemon.
.. code-block:: bash
sudo systemctl start docker
If we want Docker to start at boot, we should also:
.. code-block:: bash
sudo systemctl enable docker
The `docker` package creates a new group named `docker`. Users, other than
`root` user, need to be part of this group in order to interact with the
Docker daemon.
.. code-block:: bash
sudo usermod -G docker <username>
**Done!**, now continue with the :ref:`hello_world` example.
.. _Virtualization project: https://build.opensuse.org/project/show/Virtualization
.. _OBS: https://build.opensuse.org/
.. _this: http://software.opensuse.org/package/docker

View file

@ -1,97 +0,0 @@
:title: Installation on Rackspace Cloud
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Rackspace Cloud, installation, docker, linux, ubuntu
Rackspace Cloud
===============
.. include:: install_unofficial.inc
Installing Docker on Ubuntu provided by Rackspace is pretty
straightforward, and you should mostly be able to follow the
:ref:`ubuntu_linux` installation guide.
**However, there is one caveat:**
If you are using any Linux not already shipping with the 3.8 kernel
you will need to install it. And this is a little more difficult on
Rackspace.
Rackspace boots their servers using grub's ``menu.lst`` and does not
like non 'virtual' packages (e.g. Xen compatible) kernels there,
although they do work. This results in ``update-grub`` not having the
expected result, and you will need to set the kernel manually.
**Do not attempt this on a production machine!**
.. code-block:: bash
# update apt
apt-get update
# install the new kernel
apt-get install linux-generic-lts-raring
Great, now you have the kernel installed in ``/boot/``, next you need to make it
boot next time.
.. code-block:: bash
# find the exact names
find /boot/ -name '*3.8*'
# this should return some results
Now you need to manually edit ``/boot/grub/menu.lst``, you will find a
section at the bottom with the existing options. Copy the top one and
substitute the new kernel into that. Make sure the new kernel is on
top, and double check the kernel and initrd lines point to the right files.
Take special care to double check the kernel and initrd entries.
.. code-block:: bash
# now edit /boot/grub/menu.lst
vi /boot/grub/menu.lst
It will probably look something like this:
::
## ## End Default Options ##
title Ubuntu 12.04.2 LTS, kernel 3.8.x generic
root (hd0)
kernel /boot/vmlinuz-3.8.0-19-generic root=/dev/xvda1 ro quiet splash console=hvc0
initrd /boot/initrd.img-3.8.0-19-generic
title Ubuntu 12.04.2 LTS, kernel 3.2.0-38-virtual
root (hd0)
kernel /boot/vmlinuz-3.2.0-38-virtual root=/dev/xvda1 ro quiet splash console=hvc0
initrd /boot/initrd.img-3.2.0-38-virtual
title Ubuntu 12.04.2 LTS, kernel 3.2.0-38-virtual (recovery mode)
root (hd0)
kernel /boot/vmlinuz-3.2.0-38-virtual root=/dev/xvda1 ro quiet splash single
initrd /boot/initrd.img-3.2.0-38-virtual
Reboot the server (either via command line or console)
.. code-block:: bash
# reboot
Verify the kernel was updated
.. code-block:: bash
uname -a
# Linux docker-12-04 3.8.0-19-generic #30~precise1-Ubuntu SMP Wed May 1 22:26:36 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux
# nice! 3.8.
Now you can finish with the :ref:`ubuntu_linux` instructions.

View file

@ -1,85 +0,0 @@
:title: Installation on Red Hat Enterprise Linux
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Docker, Docker documentation, requirements, linux, rhel, centos
.. _rhel:
Red Hat Enterprise Linux
========================
.. include:: install_header.inc
.. include:: install_unofficial.inc
Docker is available for **RHEL** on EPEL. These instructions should work for
both RHEL and CentOS. They will likely work for other binary compatible EL6
distributions as well, but they haven't been tested.
Please note that this package is part of `Extra Packages for Enterprise
Linux (EPEL)`_, a community effort to create and maintain additional packages
for the RHEL distribution.
Also note that due to the current Docker limitations, Docker is able to run
only on the **64 bit** architecture.
You will need `RHEL 6.5`_ or higher, with a RHEL 6 kernel version 2.6.32-431 or higher
as this has specific kernel fixes to allow Docker to work.
Installation
------------
Firstly, you need to install the EPEL repository. Please follow the `EPEL installation instructions`_.
The ``docker-io`` package provides Docker on EPEL.
If you already have the (unrelated) ``docker`` package installed, it will
conflict with ``docker-io``. There's a `bug report`_ filed for it.
To proceed with ``docker-io`` installation, please remove
``docker`` first.
Next, let's install the ``docker-io`` package which will install Docker on our host.
.. code-block:: bash
sudo yum -y install docker-io
To update the ``docker-io`` package
.. code-block:: bash
sudo yum -y update docker-io
Now that it's installed, let's start the Docker daemon.
.. code-block:: bash
sudo service docker start
If we want Docker to start at boot, we should also:
.. code-block:: bash
sudo chkconfig docker on
Now let's verify that Docker is working.
.. code-block:: bash
sudo docker run -i -t fedora /bin/bash
**Done!**, now continue with the :ref:`hello_world` example.
Issues?
-------
If you have any issues - please report them directly in the `Red Hat Bugzilla for docker-io component`_.
.. _Extra Packages for Enterprise Linux (EPEL): https://fedoraproject.org/wiki/EPEL
.. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
.. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io
.. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676
.. _RHEL 6.5: https://access.redhat.com/site/articles/3078#RHEL6

View file

@ -1,25 +0,0 @@
:title: Installation on IBM SoftLayer
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, installation
IBM SoftLayer
=============
.. include:: install_header.inc
IBM SoftLayer QuickStart
-------------------------
1. Create an `IBM SoftLayer account <https://www.softlayer.com/cloudlayer/>`_.
2. Log in to the `SoftLayer Console <https://control.softlayer.com/devices/>`_.
3. Go to `Order Hourly Computing Instance Wizard <https://manage.softlayer.com/Sales/orderHourlyComputingInstance>`_ on your SoftLayer Console.
4. Create a new *CloudLayer Computing Instance* (CCI) using the default values for all the fields and choose:
- *First Available* as ``Datacenter`` and
- *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* as ``Operating System``.
5. Click the *Continue Your Order* button at the bottom right and select *Go to checkout*.
6. Insert the required *User Metadata* and place the order.
7. Then continue with the :ref:`ubuntu_linux` instructions.
Continue with the :ref:`hello_world` example.

View file

@ -1,380 +0,0 @@
:title: Installation on Ubuntu
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
.. _ubuntu_linux:
Ubuntu
======
.. warning::
These instructions have changed for 0.6. If you are upgrading from
an earlier version, you will need to follow them again.
.. include:: install_header.inc
Docker is supported on the following versions of Ubuntu:
- :ref:`ubuntu_precise`
- :ref:`ubuntu_raring_saucy`
Please read :ref:`ufw`, if you plan to use `UFW (Uncomplicated
Firewall) <https://help.ubuntu.com/community/UFW>`_
.. _ubuntu_precise:
Ubuntu Precise 12.04 (LTS) (64-bit)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This installation path should work at all times.
Dependencies
------------
**Linux kernel 3.8**
Due to a bug in LXC, Docker works best on the 3.8 kernel. Precise
comes with a 3.2 kernel, so we need to upgrade it. The kernel you'll
install when following these steps comes with AUFS built in. We also
include the generic headers to enable packages that depend on them,
like ZFS and the VirtualBox guest additions. If you didn't install the
headers for your "precise" kernel, then you can skip these headers for
the "raring" kernel. But it is safer to include them if you're not
sure.
.. code-block:: bash
# install the backported kernel
sudo apt-get update
sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring
# reboot
sudo reboot
Installation
------------
.. warning::
These instructions have changed for 0.6. If you are upgrading from
an earlier version, you will need to follow them again.
Docker is available as a Debian package, which makes installation
easy. **See the** :ref:`installmirrors` **section below if you are not in
the United States.** Other sources of the Debian packages may be
faster for you to install.
First, check that your APT system can deal with ``https`` URLs:
the file ``/usr/lib/apt/methods/https`` should exist. If it doesn't,
you need to install the package ``apt-transport-https``.
.. code-block:: bash
[ -e /usr/lib/apt/methods/https ] || {
apt-get update
apt-get install apt-transport-https
}
Then, add the Docker repository key to your local keychain.
.. code-block:: bash
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
Add the Docker repository to your apt sources list, update and install the
``lxc-docker`` package.
*You may receive a warning that the package isn't trusted. Answer yes to
continue installation.*
.. code-block:: bash
sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\
> /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install lxc-docker
.. note::
There is also a simple ``curl`` script available to help with this process.
.. code-block:: bash
curl -s https://get.docker.io/ubuntu/ | sudo sh
Now verify that the installation has worked by downloading the ``ubuntu`` image
and launching a container.
.. code-block:: bash
sudo docker run -i -t ubuntu /bin/bash
Type ``exit`` to exit
**Done!**, now continue with the :ref:`hello_world` example.
.. _ubuntu_raring_saucy:
Ubuntu Raring 13.04 and Saucy 13.10 (64 bit)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These instructions cover both Ubuntu Raring 13.04 and Saucy 13.10.
Dependencies
------------
**Optional AUFS filesystem support**
Ubuntu Raring already comes with the 3.8 kernel, so we don't need to install it. However, not all systems
have AUFS filesystem support enabled. AUFS support is optional as of version 0.7, but it's still available as
a driver and we recommend using it if you can.
To make sure AUFS is installed, run the following commands:
.. code-block:: bash
sudo apt-get update
sudo apt-get install linux-image-extra-`uname -r`
Installation
------------
Docker is available as a Debian package, which makes installation easy.
.. warning::
Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
to follow them again.
First add the Docker repository key to your local keychain.
.. code-block:: bash
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
Add the Docker repository to your apt sources list, update and install the
``lxc-docker`` package.
.. code-block:: bash
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
> /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install lxc-docker
Now verify that the installation has worked by downloading the ``ubuntu`` image
and launching a container.
.. code-block:: bash
sudo docker run -i -t ubuntu /bin/bash
Type ``exit`` to exit
**Done!**, now continue with the :ref:`hello_world` example.
Giving non-root access
----------------------
The ``docker`` daemon always runs as the root user, and since Docker version
0.5.2, the ``docker`` daemon binds to a Unix socket instead of a TCP port. By
default that Unix socket is owned by the user *root*, and so, by default, you
can access it with ``sudo``.
Starting in version 0.5.3, if you (or your Docker installer) create a
Unix group called *docker* and add users to it, then the ``docker``
daemon will make the ownership of the Unix socket read/writable by the
*docker* group when the daemon starts. The ``docker`` daemon must
always run as the root user, but if you run the ``docker`` client as a user in
the *docker* group then you don't need to add ``sudo`` to all the
client commands. As of 0.9.0, you can specify that a group other than ``docker``
should own the Unix socket with the ``-G`` option.
.. warning:: The *docker* group (or the group specified with ``-G``) is
root-equivalent; see :ref:`dockersecurity_daemon` details.
**Example:**
.. code-block:: bash
# Add the docker group if it doesn't already exist.
sudo groupadd docker
# Add the connected user "${USER}" to the docker group.
# Change the user name to match your preferred user.
# You may have to logout and log back in again for
# this to take effect.
sudo gpasswd -a ${USER} docker
# Restart the Docker daemon.
sudo service docker restart
Upgrade
--------
To install the latest version of docker, use the standard ``apt-get`` method:
.. code-block:: bash
# update your sources list
sudo apt-get update
# install the latest
sudo apt-get install lxc-docker
Memory and Swap Accounting
^^^^^^^^^^^^^^^^^^^^^^^^^^
If you want to enable memory and swap accounting, you must add the following
command-line parameters to your kernel::
cgroup_enable=memory swapaccount=1
On systems using GRUB (which is the default for Ubuntu), you can add those
parameters by editing ``/etc/default/grub`` and extending
``GRUB_CMDLINE_LINUX``. Look for the following line::
GRUB_CMDLINE_LINUX=""
And replace it by the following one::
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
Then run ``sudo update-grub``, and reboot.
These parameters will help you get rid of the following warnings::
WARNING: Your kernel does not support cgroup swap limit.
WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.
Troubleshooting
^^^^^^^^^^^^^^^
On Linux Mint, the ``cgroup-lite`` package is not installed by default.
Before Docker will work correctly, you will need to install this via:
.. code-block:: bash
sudo apt-get update && sudo apt-get install cgroup-lite
.. _ufw:
Docker and UFW
^^^^^^^^^^^^^^
Docker uses a bridge to manage container networking. By default, UFW drops all
`forwarding` traffic. As a result you will need to enable UFW forwarding:
.. code-block:: bash
sudo nano /etc/default/ufw
----
# Change:
# DEFAULT_FORWARD_POLICY="DROP"
# to
DEFAULT_FORWARD_POLICY="ACCEPT"
Then reload UFW:
.. code-block:: bash
sudo ufw reload
UFW's default set of rules denies all `incoming` traffic. If you want to be
able to reach your containers from another host then you should allow
incoming connections on the Docker port (default 4243):
.. code-block:: bash
sudo ufw allow 4243/tcp
Docker and local DNS server warnings
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Systems which are running Ubuntu or an Ubuntu derivative on the desktop will
use `127.0.0.1` as the default nameserver in `/etc/resolv.conf`. NetworkManager
sets up dnsmasq to use the real DNS servers of the connection and sets up
`nameserver 127.0.0.1` in `/etc/resolv.conf`.
When starting containers on these desktop machines, users will see a warning:
.. code-block:: bash
WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : [8.8.8.8 8.8.4.4]
This warning is shown because the containers can't use the local DNS nameserver
and Docker will default to using an external nameserver.
This can be worked around by specifying a DNS server to be used by the Docker
daemon for the containers:
.. code-block:: bash
sudo nano /etc/default/docker
---
# Add:
DOCKER_OPTS="--dns 8.8.8.8"
# 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1
# multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1
The Docker daemon has to be restarted:
.. code-block:: bash
sudo restart docker
.. warning:: If you're doing this on a laptop which connects to various networks, make sure to choose a public DNS server.
An alternative solution involves disabling dnsmasq in NetworkManager by
following these steps:
.. code-block:: bash
sudo nano /etc/NetworkManager/NetworkManager.conf
----
# Change:
dns=dnsmasq
# to
#dns=dnsmasq
NetworkManager and Docker need to be restarted afterwards:
.. code-block:: bash
sudo restart network-manager
sudo restart docker
.. warning:: This might make DNS resolution slower on some networks.
.. _installmirrors:
Mirrors
^^^^^^^
You should ``ping get.docker.io`` and compare the latency to the
following mirrors, and pick whichever one is best for you.
Yandex
------
`Yandex <http://yandex.ru/>`_ in Russia is mirroring the Docker Debian
packages, updating every 6 hours. Substitute
``http://mirror.yandex.ru/mirrors/docker/`` for
``http://get.docker.io/ubuntu`` in the instructions above. For example:
.. code-block:: bash
sudo sh -c "echo deb http://mirror.yandex.ru/mirrors/docker/ docker main\
> /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install lxc-docker

View file

@ -1,72 +0,0 @@
:title: Installation on Windows
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Docker, Docker documentation, Windows, requirements, virtualbox, boot2docker
.. _windows:
Microsoft Windows
=================
Docker can run on Windows using a virtualization platform like VirtualBox. A Linux
distribution is run inside a virtual machine and that's where Docker will run.
Installation
------------
.. include:: install_header.inc
1. Install VirtualBox from https://www.virtualbox.org - or follow this `tutorial <http://www.slideshare.net/julienbarbier42/install-virtualbox-on-windows-7>`_.
2. Download the latest boot2docker.iso from https://github.com/boot2docker/boot2docker/releases.
3. Start VirtualBox.
4. Create a new Virtual machine with the following settings:
- `Name: boot2docker`
- `Type: Linux`
- `Version: Linux 2.6 (64 bit)`
- `Memory size: 1024 MB`
- `Hard drive: Do not add a virtual hard drive`
5. Open the settings of the virtual machine:
5.1. go to Storage
5.2. click the empty slot below `Controller: IDE`
5.3. click the disc icon on the right of `IDE Secondary Master`
5.4. click `Choose a virtual CD/DVD disk file`
6. Browse to the path where you've saved the `boot2docker.iso`, select the `boot2docker.iso` and click open.
7. Click OK on the Settings dialog to save the changes and close the window.
8. Start the virtual machine by clicking the green start button.
9. The boot2docker virtual machine should boot now.
Running Docker
--------------
boot2docker will log you in automatically so you can start using Docker right
away.
Let's try the “hello world” example. Run
.. code-block:: bash
docker run busybox echo hello world
This will download the small busybox image and print hello world.
Observations
------------
Persistent storage
``````````````````
The virtual machine created above lacks any persistent data storage. All images
and containers will be lost when shutting down or rebooting the VM.

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,306 +0,0 @@
:title: docker.io Accounts API
:description: API Documentation for docker.io accounts.
:keywords: API, Docker, accounts, REST, documentation
======================
docker.io Accounts API
======================
1. Endpoints
============
1.1 Get a single user
^^^^^^^^^^^^^^^^^^^^^
.. http:get:: /api/v1.1/users/:username/
Get profile info for the specified user.
:param username: username of the user whose profile info is being requested.
:reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token.
:statuscode 200: success, user data returned.
:statuscode 401: authentication error.
:statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``profile_read`` scope.
:statuscode 404: the specified username does not exist.
**Example request**:
.. sourcecode:: http
GET /api/v1.1/users/janedoe/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ=
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 2,
"username": "janedoe",
"url": "https://www.docker.io/api/v1.1/users/janedoe/",
"date_joined": "2014-02-12T17:58:01.431312Z",
"type": "User",
"full_name": "Jane Doe",
"location": "San Francisco, CA",
"company": "Success, Inc.",
"profile_url": "https://docker.io/",
"gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm"
"email": "jane.doe@example.com",
"is_active": true
}
1.2 Update a single user
^^^^^^^^^^^^^^^^^^^^^^^^
.. http:patch:: /api/v1.1/users/:username/
Update profile info for the specified user.
:param username: username of the user whose profile info is being updated.
:jsonparam string full_name: (optional) the new name of the user.
:jsonparam string location: (optional) the new location.
:jsonparam string company: (optional) the new company of the user.
:jsonparam string profile_url: (optional) the new profile url.
:jsonparam string gravatar_email: (optional) the new Gravatar email address.
:reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token.
:reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc.
:statuscode 200: success, user data updated.
:statuscode 400: post data validation error.
:statuscode 401: authentication error.
:statuscode 403: permission error, authenticated user must be the user whose data is being updated, OAuth access tokens must have ``profile_write`` scope.
:statuscode 404: the specified username does not exist.
**Example request**:
.. sourcecode:: http
PATCH /api/v1.1/users/janedoe/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ=
{
"location": "Private Island",
"profile_url": "http://janedoe.com/",
"company": "Retired",
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 2,
"username": "janedoe",
"url": "https://www.docker.io/api/v1.1/users/janedoe/",
"date_joined": "2014-02-12T17:58:01.431312Z",
"type": "User",
"full_name": "Jane Doe",
"location": "Private Island",
"company": "Retired",
"profile_url": "http://janedoe.com/",
"gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm"
"email": "jane.doe@example.com",
"is_active": true
}
1.3 List email addresses for a user
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. http:get:: /api/v1.1/users/:username/emails/
List email info for the specified user.
:param username: username of the user whose profile info is being updated.
:reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token
:statuscode 200: success, user data updated.
:statuscode 401: authentication error.
:statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``email_read`` scope.
:statuscode 404: the specified username does not exist.
**Example request**:
.. sourcecode:: http
GET /api/v1.1/users/janedoe/emails/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"email": "jane.doe@example.com",
"verified": true,
"primary": true
}
]
1.4 Add email address for a user
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. http:post:: /api/v1.1/users/:username/emails/
Add a new email address to the specified user's account. The email address
must be verified separately, a confirmation email is not automatically sent.
:jsonparam string email: email address to be added.
:reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token.
:reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc.
:statuscode 201: success, new email added.
:statuscode 400: data validation error.
:statuscode 401: authentication error.
:statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``email_write`` scope.
:statuscode 404: the specified username does not exist.
**Example request**:
.. sourcecode:: http
POST /api/v1.1/users/janedoe/emails/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM
{
"email": "jane.doe+other@example.com"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 Created
Content-Type: application/json
{
"email": "jane.doe+other@example.com",
"verified": false,
"primary": false
}
1.5 Update an email address for a user
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. http:patch:: /api/v1.1/users/:username/emails/
Update an email address for the specified user to either verify an email
address or set it as the primary email for the user. You cannot use this
endpoint to un-verify an email address. You cannot use this endpoint to
unset the primary email, only set another as the primary.
:param username: username of the user whose email info is being updated.
:jsonparam string email: the email address to be updated.
:jsonparam boolean verified: (optional) whether the email address is verified, must be ``true`` or absent.
:jsonparam boolean primary: (optional) whether to set the email address as the primary email, must be ``true`` or absent.
:reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token.
:reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc.
:statuscode 200: success, user's email updated.
:statuscode 400: data validation error.
:statuscode 401: authentication error.
:statuscode 403: permission error, authenticated user must be the user whose data is being updated, OAuth access tokens must have ``email_write`` scope.
:statuscode 404: the specified username or email address does not exist.
**Example request**:
Once you have independently verified an email address.
.. sourcecode:: http
PATCH /api/v1.1/users/janedoe/emails/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ=
{
"email": "jane.doe+other@example.com",
"verified": true,
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"email": "jane.doe+other@example.com",
"verified": true,
"primary": false
}
1.6 Delete email address for a user
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. http:delete:: /api/v1.1/users/:username/emails/
Delete an email address from the specified user's account. You cannot
delete a user's primary email address.
:jsonparam string email: email address to be deleted.
:reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token.
:reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc.
:statuscode 204: success, email address removed.
:statuscode 400: validation error.
:statuscode 401: authentication error.
:statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``email_write`` scope.
:statuscode 404: the specified username or email address does not exist.
**Example request**:
.. sourcecode:: http
DELETE /api/v1.1/users/janedoe/emails/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM
{
"email": "jane.doe+other@example.com"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 204 NO CONTENT
Content-Length: 0

View file

@ -1,251 +0,0 @@
:title: docker.io OAuth API
:description: API Documentation for docker.io's OAuth flow.
:keywords: API, Docker, oauth, REST, documentation
===================
docker.io OAuth API
===================
1. Brief introduction
=====================
Some docker.io API requests will require an access token to authenticate. To
get an access token for a user, that user must first grant your application
access to their docker.io account. In order for them to grant your application
access you must first register your application.
Before continuing, we encourage you to familiarize yourself with
`The OAuth 2.0 Authorization Framework <http://tools.ietf.org/html/rfc6749>`_.
*Also note that all OAuth interactions must take place over https connections*
2. Register Your Application
============================
You will need to register your application with docker.io before users will
be able to grant your application access to their account information. We
are currently only allowing applications selectively. To request registration
of your application send an email to support-accounts@docker.com with the
following information:
- The name of your application
- A description of your application and the service it will provide
to docker.io users.
- A callback URI that we will use for redirecting authorization requests to
your application. These are used in the step of getting an Authorization
Code. The domain name of the callback URI will be visible to the user when
they are requested to authorize your application.
When your application is approved you will receive a response from the
docker.io team with your ``client_id`` and ``client_secret`` which your
application will use in the steps of getting an Authorization Code and getting
an Access Token.
3. Endpoints
============
3.1 Get an Authorization Code
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Once You have registered you are ready to start integrating docker.io accounts
into your application! The process is usually started by a user following a
link in your application to an OAuth Authorization endpoint.
.. http:get:: /api/v1.1/o/authorize/
Request that a docker.io user authorize your application. If the user is
not already logged in, they will be prompted to login. The user is then
presented with a form to authorize your application for the requested
access scope. On submission, the user will be redirected to the specified
``redirect_uri`` with an Authorization Code.
:query client_id: The ``client_id`` given to your application at
registration.
:query response_type: MUST be set to ``code``. This specifies that you
would like an Authorization Code returned.
:query redirect_uri: The URI to redirect back to after the user has
authorized your application. If omitted, the first of your registered
``response_uris`` is used. If included, it must be one of the URIs
which were submitted when registering your application.
:query scope: The extent of access permissions you are requesting.
Currently, the scope options are ``profile_read``, ``profile_write``,
``email_read``, and ``email_write``. Scopes must be separated by a
space. If omitted, the default scopes ``profile_read email_read`` are
used.
:query state: (Recommended) Used by your application to maintain state
between the authorization request and callback to protect against CSRF
attacks.
**Example Request**
Asking the user for authorization.
.. sourcecode:: http
GET /api/v1.1/o/authorize/?client_id=TestClientID&response_type=code&redirect_uri=https%3A//my.app/auth_complete/&scope=profile_read%20email_read&state=abc123 HTTP/1.1
Host: www.docker.io
**Authorization Page**
When the user follows a link, making the above GET request, they will be
asked to login to their docker.io account if they are not already and then
be presented with the following authorization prompt which asks the user
to authorize your application with a description of the requested scopes.
.. image:: _static/io_oauth_authorization_page.png
Once the user allows or denies your Authorization Request the user will be
redirected back to your application. Included in that request will be the
following query parameters:
``code``
The Authorization code generated by the docker.io authorization server.
Present it again to request an Access Token. This code expires in 60
seconds.
``state``
If the ``state`` parameter was present in the authorization request this
will be the exact value received from that request.
``error``
An error message in the event of the user denying the authorization or
some other kind of error with the request.
3.2 Get an Access Token
^^^^^^^^^^^^^^^^^^^^^^^
Once the user has authorized your application, a request will be made to your
application's specified ``redirect_uri`` which includes a ``code`` parameter
that you must then use to get an Access Token.
.. http:post:: /api/v1.1/o/token/
Submit your newly granted Authorization Code and your application's
credentials to receive an Access Token and Refresh Token. The code is valid
for 60 seconds and cannot be used more than once.
:reqheader Authorization: HTTP basic authentication using your
application's ``client_id`` and ``client_secret``
:form grant_type: MUST be set to ``authorization_code``
:form code: The authorization code received from the user's redirect
request.
:form redirect_uri: The same ``redirect_uri`` used in the authentication
request.
**Example Request**
Using an authorization code to get an access token.
.. sourcecode:: http
POST /api/v1.1/o/token/ HTTP/1.1
Host: www.docker.io
Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ=
Accept: application/json
Content-Type: application/json
{
"grant_type": "code",
"code": "YXV0aG9yaXphdGlvbl9jb2Rl",
"redirect_uri": "https://my.app/auth_complete/"
}
**Example Response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json;charset=UTF-8
{
"username": "janedoe",
"user_id": 42,
"access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS",
"expires_in": 15552000,
"token_type": "Bearer",
"scope": "profile_read email_read",
"refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc"
}
In the case of an error, there will be a non-200 HTTP Status and and data
detailing the error.
3.3 Refresh a Token
^^^^^^^^^^^^^^^^^^^
Once the Access Token expires you can use your ``refresh_token`` to have
docker.io issue your application a new Access Token, if the user has not
revoked access from your application.
.. http:post:: /api/v1.1/o/token/
Submit your ``refresh_token`` and application's credentials to receive a
new Access Token and Refresh Token. The ``refresh_token`` can be used
only once.
:reqheader Authorization: HTTP basic authentication using your
application's ``client_id`` and ``client_secret``
:form grant_type: MUST be set to ``refresh_token``
:form refresh_token: The ``refresh_token`` which was issued to your
application.
:form scope: (optional) The scope of the access token to be returned.
Must not include any scope not originally granted by the user and if
omitted is treated as equal to the scope originally granted.
**Example Request**
Refreshing an access token.
.. sourcecode:: http
POST /api/v1.1/o/token/ HTTP/1.1
Host: www.docker.io
Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ=
Accept: application/json
Content-Type: application/json
{
"grant_type": "refresh_token",
"refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc",
}
**Example Response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json;charset=UTF-8
{
"username": "janedoe",
"user_id": 42,
"access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS",
"expires_in": 15552000,
"token_type": "Bearer",
"scope": "profile_read email_read",
"refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc"
}
In the case of an error, there will be a non-200 HTTP Status and and data
detailing the error.
4. Use an Access Token with the API
===================================
Many of the docker.io API requests will require a Authorization request header
field. Simply ensure you add this header with "Bearer <``access_token``>":
.. sourcecode:: http
GET /api/v1.1/resource HTTP/1.1
Host: docker.io
Authorization: Bearer 2YotnFZFEjr1zCsicMWpAA

View file

@ -1,404 +0,0 @@
:title: Remote API
:description: API Documentation for Docker
:keywords: API, Docker, rcli, REST, documentation
.. COMMENT use https://pythonhosted.org/sphinxcontrib-httpdomain/ to
.. document the REST API.
=================
Docker Remote API
=================
1. Brief introduction
=====================
- The Remote API is replacing rcli
- By default the Docker daemon listens on unix:///var/run/docker.sock and the client must have root access to interact with the daemon
- If a group named *docker* exists on your system, docker will apply ownership of the socket to the group
- The API tends to be REST, but for some complex commands, like attach
or pull, the HTTP connection is hijacked to transport stdout stdin
and stderr
- Since API version 1.2, the auth configuration is now handled client
side, so the client has to send the authConfig as POST in
/images/(name)/push
- authConfig, set as the ``X-Registry-Auth`` header, is currently a Base64 encoded (json) string with credentials:
``{'username': string, 'password': string, 'email': string, 'serveraddress' : string}``
2. Versions
===========
The current version of the API is 1.11
Calling /images/<name>/insert is the same as calling
/v1.11/images/<name>/insert
You can still call an old version of the api using
/v1.11/images/<name>/insert
v1.11
*****
Full Documentation
------------------
:doc:`docker_remote_api_v1.11`
What's new
----------
.. http:get:: /events
**New!** You can now use the ``-until`` parameter to close connection after timestamp.
v1.10
*****
Full Documentation
------------------
:doc:`docker_remote_api_v1.10`
What's new
----------
.. http:delete:: /images/(name)
**New!** You can now use the force parameter to force delete of an image, even if it's
tagged in multiple repositories.
**New!** You can now use the noprune parameter to prevent the deletion of parent images
.. http:delete:: /containers/(id)
**New!** You can now use the force paramter to force delete a container, even if
it is currently running
v1.9
****
Full Documentation
------------------
:doc:`docker_remote_api_v1.9`
What's new
----------
.. http:post:: /build
**New!** This endpoint now takes a serialized ConfigFile which it uses to
resolve the proper registry auth credentials for pulling the base image.
Clients which previously implemented the version accepting an AuthConfig
object must be updated.
v1.8
****
Full Documentation
------------------
What's new
----------
.. http:post:: /build
**New!** This endpoint now returns build status as json stream. In case
of a build error, it returns the exit status of the failed command.
.. http:get:: /containers/(id)/json
**New!** This endpoint now returns the host config for the container.
.. http:post:: /images/create
.. http:post:: /images/(name)/insert
.. http:post:: /images/(name)/push
**New!** progressDetail object was added in the JSON. It's now possible
to get the current value and the total of the progress without having to
parse the string.
v1.7
****
Full Documentation
------------------
What's new
----------
.. http:get:: /images/json
The format of the json returned from this uri changed. Instead of an entry
for each repo/tag on an image, each image is only represented once, with a
nested attribute indicating the repo/tags that apply to that image.
Instead of:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "12.04",
"Repository": "ubuntu"
},
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "latest",
"Repository": "ubuntu"
},
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "precise",
"Repository": "ubuntu"
},
{
"VirtualSize": 180116135,
"Size": 24653,
"Created": 1364102658,
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Tag": "12.10",
"Repository": "ubuntu"
},
{
"VirtualSize": 180116135,
"Size": 24653,
"Created": 1364102658,
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Tag": "quantal",
"Repository": "ubuntu"
}
]
The returned json looks like this:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"RepoTags": [
"ubuntu:12.04",
"ubuntu:precise",
"ubuntu:latest"
],
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Created": 1365714795,
"Size": 131506275,
"VirtualSize": 131506275
},
{
"RepoTags": [
"ubuntu:12.10",
"ubuntu:quantal"
],
"ParentId": "27cf784147099545",
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Created": 1364102658,
"Size": 24653,
"VirtualSize": 180116135
}
]
.. http:get:: /images/viz
This URI no longer exists. The ``images --viz`` output is now generated in
the client, using the ``/images/json`` data.
v1.6
****
Full Documentation
------------------
What's new
----------
.. http:post:: /containers/(id)/attach
**New!** You can now split stderr from stdout. This is done by prefixing
a header to each transmition. See :http:post:`/containers/(id)/attach`.
The WebSocket attach is unchanged.
Note that attach calls on the previous API version didn't change. Stdout and
stderr are merged.
v1.5
****
Full Documentation
------------------
What's new
----------
.. http:post:: /images/create
**New!** You can now pass registry credentials (via an AuthConfig object)
through the `X-Registry-Auth` header
.. http:post:: /images/(name)/push
**New!** The AuthConfig object now needs to be passed through
the `X-Registry-Auth` header
.. http:get:: /containers/json
**New!** The format of the `Ports` entry has been changed to a list of
dicts each containing `PublicPort`, `PrivatePort` and `Type` describing a
port mapping.
v1.4
****
Full Documentation
------------------
What's new
----------
.. http:post:: /images/create
**New!** When pulling a repo, all images are now downloaded in parallel.
.. http:get:: /containers/(id)/top
**New!** You can now use ps args with docker top, like `docker top <container_id> aux`
.. http:get:: /events:
**New!** Image's name added in the events
v1.3
****
docker v0.5.0 51f6c4a_
Full Documentation
------------------
What's new
----------
.. http:get:: /containers/(id)/top
List the processes running inside a container.
.. http:get:: /events:
**New!** Monitor docker's events via streaming or via polling
Builder (/build):
- Simplify the upload of the build context
- Simply stream a tarball instead of multipart upload with 4
intermediary buffers
- Simpler, less memory usage, less disk usage and faster
.. Warning::
The /build improvements are not reverse-compatible. Pre 1.3 clients
will break on /build.
List containers (/containers/json):
- You can use size=1 to get the size of the containers
Start containers (/containers/<id>/start):
- You can now pass host-specific configuration (e.g. bind mounts) in
the POST body for start calls
v1.2
****
docker v0.4.2 2e7649b_
Full Documentation
------------------
What's new
----------
The auth configuration is now handled by the client.
The client should send it's authConfig as POST on each call of
/images/(name)/push
.. http:get:: /auth
**Deprecated.**
.. http:post:: /auth
Only checks the configuration but doesn't store it on the server
Deleting an image is now improved, will only untag the image if it
has children and remove all the untagged parents if has any.
.. http:post:: /images/<name>/delete
Now returns a JSON structure with the list of images
deleted/untagged.
v1.1
****
docker v0.4.0 a8ae398_
Full Documentation
------------------
What's new
----------
.. http:post:: /images/create
.. http:post:: /images/(name)/insert
.. http:post:: /images/(name)/push
Uses json stream instead of HTML hijack, it looks like this:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{"status":"Pushing..."}
{"status":"Pushing", "progress":"1/? (n/a)"}
{"error":"Invalid..."}
...
v1.0
****
docker v0.3.4 8d73740_
Full Documentation
------------------
What's new
----------
Initial version
.. _a8ae398: https://github.com/dotcloud/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f
.. _8d73740: https://github.com/dotcloud/docker/commit/8d73740343778651c09160cde9661f5f387b36f4
.. _2e7649b: https://github.com/dotcloud/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168
.. _51f6c4a: https://github.com/dotcloud/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,20 +0,0 @@
:title: API Documentation
:description: docker documentation
:keywords: docker, ipa, documentation
APIs
====
Your programs and scripts can access Docker's functionality via these interfaces:
.. toctree::
:maxdepth: 3
registry_index_spec
registry_api
index_api
docker_remote_api
remote_api_client_libraries
docker_io_oauth_api
docker_io_accounts_api

View file

@ -1,556 +0,0 @@
:title: Index API
:description: API Documentation for Docker Index
:keywords: API, Docker, index, REST, documentation
=================
Docker Index API
=================
1. Brief introduction
=====================
- This is the REST API for the Docker index
- Authorization is done with basic auth over SSL
- Not all commands require authentication, only those noted as such.
2. Endpoints
============
2.1 Repository
^^^^^^^^^^^^^^
Repositories
*************
User Repo
~~~~~~~~~
.. http:put:: /v1/repositories/(namespace)/(repo_name)/
Create a user repository with the given ``namespace`` and ``repo_name``.
**Example Request**:
.. sourcecode:: http
PUT /v1/repositories/foo/bar/ HTTP/1.1
Host: index.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Basic akmklmasadalkm==
X-Docker-Token: true
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"}]
:parameter namespace: the namespace for the repo
:parameter repo_name: the name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
WWW-Authenticate: Token signature=123abc,repository="foo/bar",access=write
X-Docker-Token: signature=123abc,repository="foo/bar",access=write
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
""
:statuscode 200: Created
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
:statuscode 401: Unauthorized
:statuscode 403: Account is not Active
.. http:delete:: /v1/repositories/(namespace)/(repo_name)/
Delete a user repository with the given ``namespace`` and ``repo_name``.
**Example Request**:
.. sourcecode:: http
DELETE /v1/repositories/foo/bar/ HTTP/1.1
Host: index.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Basic akmklmasadalkm==
X-Docker-Token: true
""
:parameter namespace: the namespace for the repo
:parameter repo_name: the name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 202
Vary: Accept
Content-Type: application/json
WWW-Authenticate: Token signature=123abc,repository="foo/bar",access=delete
X-Docker-Token: signature=123abc,repository="foo/bar",access=delete
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
""
:statuscode 200: Deleted
:statuscode 202: Accepted
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
:statuscode 401: Unauthorized
:statuscode 403: Account is not Active
Library Repo
~~~~~~~~~~~~
.. http:put:: /v1/repositories/(repo_name)/
Create a library repository with the given ``repo_name``.
This is a restricted feature only available to docker admins.
When namespace is missing, it is assumed to be ``library``
**Example Request**:
.. sourcecode:: http
PUT /v1/repositories/foobar/ HTTP/1.1
Host: index.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Basic akmklmasadalkm==
X-Docker-Token: true
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"}]
:parameter repo_name: the library name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
WWW-Authenticate: Token signature=123abc,repository="library/foobar",access=write
X-Docker-Token: signature=123abc,repository="foo/bar",access=write
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
""
:statuscode 200: Created
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
:statuscode 401: Unauthorized
:statuscode 403: Account is not Active
.. http:delete:: /v1/repositories/(repo_name)/
Delete a library repository with the given ``repo_name``.
This is a restricted feature only available to docker admins.
When namespace is missing, it is assumed to be ``library``
**Example Request**:
.. sourcecode:: http
DELETE /v1/repositories/foobar/ HTTP/1.1
Host: index.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Basic akmklmasadalkm==
X-Docker-Token: true
""
:parameter repo_name: the library name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 202
Vary: Accept
Content-Type: application/json
WWW-Authenticate: Token signature=123abc,repository="library/foobar",access=delete
X-Docker-Token: signature=123abc,repository="foo/bar",access=delete
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
""
:statuscode 200: Deleted
:statuscode 202: Accepted
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
:statuscode 401: Unauthorized
:statuscode 403: Account is not Active
Repository Images
*****************
User Repo Images
~~~~~~~~~~~~~~~~
.. http:put:: /v1/repositories/(namespace)/(repo_name)/images
Update the images for a user repo.
**Example Request**:
.. sourcecode:: http
PUT /v1/repositories/foo/bar/images HTTP/1.1
Host: index.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Basic akmklmasadalkm==
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}]
:parameter namespace: the namespace for the repo
:parameter repo_name: the name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 204
Vary: Accept
Content-Type: application/json
""
:statuscode 204: Created
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
:statuscode 401: Unauthorized
:statuscode 403: Account is not Active or permission denied
.. http:get:: /v1/repositories/(namespace)/(repo_name)/images
get the images for a user repo.
**Example Request**:
.. sourcecode:: http
GET /v1/repositories/foo/bar/images HTTP/1.1
Host: index.docker.io
Accept: application/json
:parameter namespace: the namespace for the repo
:parameter repo_name: the name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"},
{"id": "ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds",
"checksum": "34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew"}]
:statuscode 200: OK
:statuscode 404: Not found
Library Repo Images
~~~~~~~~~~~~~~~~~~~
.. http:put:: /v1/repositories/(repo_name)/images
Update the images for a library repo.
**Example Request**:
.. sourcecode:: http
PUT /v1/repositories/foobar/images HTTP/1.1
Host: index.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Basic akmklmasadalkm==
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}]
:parameter repo_name: the library name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 204
Vary: Accept
Content-Type: application/json
""
:statuscode 204: Created
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
:statuscode 401: Unauthorized
:statuscode 403: Account is not Active or permission denied
.. http:get:: /v1/repositories/(repo_name)/images
get the images for a library repo.
**Example Request**:
.. sourcecode:: http
GET /v1/repositories/foobar/images HTTP/1.1
Host: index.docker.io
Accept: application/json
:parameter repo_name: the library name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"},
{"id": "ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds",
"checksum": "34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew"}]
:statuscode 200: OK
:statuscode 404: Not found
Repository Authorization
************************
Library Repo
~~~~~~~~~~~~
.. http:put:: /v1/repositories/(repo_name)/auth
authorize a token for a library repo
**Example Request**:
.. sourcecode:: http
PUT /v1/repositories/foobar/auth HTTP/1.1
Host: index.docker.io
Accept: application/json
Authorization: Token signature=123abc,repository="library/foobar",access=write
:parameter repo_name: the library name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
"OK"
:statuscode 200: OK
:statuscode 403: Permission denied
:statuscode 404: Not found
User Repo
~~~~~~~~~
.. http:put:: /v1/repositories/(namespace)/(repo_name)/auth
authorize a token for a user repo
**Example Request**:
.. sourcecode:: http
PUT /v1/repositories/foo/bar/auth HTTP/1.1
Host: index.docker.io
Accept: application/json
Authorization: Token signature=123abc,repository="foo/bar",access=write
:parameter namespace: the namespace for the repo
:parameter repo_name: the name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
"OK"
:statuscode 200: OK
:statuscode 403: Permission denied
:statuscode 404: Not found
2.2 Users
^^^^^^^^^
User Login
**********
.. http:get:: /v1/users
If you want to check your login, you can try this endpoint
**Example Request**:
.. sourcecode:: http
GET /v1/users HTTP/1.1
Host: index.docker.io
Accept: application/json
Authorization: Basic akmklmasadalkm==
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
OK
:statuscode 200: no error
:statuscode 401: Unauthorized
:statuscode 403: Account is not Active
User Register
*************
.. http:post:: /v1/users
Registering a new account.
**Example request**:
.. sourcecode:: http
POST /v1/users HTTP/1.1
Host: index.docker.io
Accept: application/json
Content-Type: application/json
{"email": "sam@dotcloud.com",
"password": "toto42",
"username": "foobar"'}
:jsonparameter email: valid email address, that needs to be confirmed
:jsonparameter username: min 4 character, max 30 characters, must match the regular expression [a-z0-9\_].
:jsonparameter password: min 5 characters
**Example Response**:
.. sourcecode:: http
HTTP/1.1 201 OK
Vary: Accept
Content-Type: application/json
"User Created"
:statuscode 201: User Created
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
Update User
***********
.. http:put:: /v1/users/(username)/
Change a password or email address for given user. If you pass in an email,
it will add it to your account, it will not remove the old one. Passwords will
be updated.
It is up to the client to verify that that password that is sent is the one that
they want. Common approach is to have them type it twice.
**Example Request**:
.. sourcecode:: http
PUT /v1/users/fakeuser/ HTTP/1.1
Host: index.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Basic akmklmasadalkm==
{"email": "sam@dotcloud.com",
"password": "toto42"}
:parameter username: username for the person you want to update
**Example Response**:
.. sourcecode:: http
HTTP/1.1 204
Vary: Accept
Content-Type: application/json
""
:statuscode 204: User Updated
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
:statuscode 401: Unauthorized
:statuscode 403: Account is not Active
:statuscode 404: User not found
2.3 Search
^^^^^^^^^^
If you need to search the index, this is the endpoint you would use.
Search
******
.. http:get:: /v1/search
Search the Index given a search term. It accepts :http:method:`get` only.
**Example request**:
.. sourcecode:: http
GET /v1/search?q=search_term HTTP/1.1
Host: example.com
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{"query":"search_term",
"num_results": 3,
"results" : [
{"name": "ubuntu", "description": "An ubuntu image..."},
{"name": "centos", "description": "A centos image..."},
{"name": "fedora", "description": "A fedora image..."}
]
}
:query q: what you want to search for
:statuscode 200: no error
:statuscode 500: server error

View file

@ -1,504 +0,0 @@
:title: Registry API
:description: API Documentation for Docker Registry
:keywords: API, Docker, index, registry, REST, documentation
===================
Docker Registry API
===================
1. Brief introduction
=====================
- This is the REST API for the Docker Registry
- It stores the images and the graph for a set of repositories
- It does not have user accounts data
- It has no notion of user accounts or authorization
- It delegates authentication and authorization to the Index Auth service using tokens
- It supports different storage backends (S3, cloud files, local FS)
- It doesnt have a local database
- It will be open-sourced at some point
We expect that there will be multiple registries out there. To help to grasp
the context, here are some examples of registries:
- **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are supported by dotCloud. It features read/write access, and delegates authentication and authorization to the Index.
- **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure that the customers of the third-party provider can “docker pull” those images locally.
- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotClouds control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
.. note::
Mirror registries and private registries which do not use the Index dont even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
.. note::
The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):
- HTTP with GET (and PUT for read-write registries);
- local mount point;
- remote docker addressed through SSH.
The latter would only require two new commands in docker, e.g. ``registryget``
and ``registryput``, wrapping access to the local filesystem (and optionally
doing consistency checks). Authentication and authorization are then delegated
to SSH (e.g. with public keys).
2. Endpoints
============
2.1 Images
----------
Layer
*****
.. http:get:: /v1/images/(image_id)/layer
get image layer for a given ``image_id``
**Example Request**:
.. sourcecode:: http
GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Token signature=123abc,repository="foo/bar",access=read
:parameter image_id: the id for the layer you want to get
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
X-Docker-Registry-Version: 0.6.0
Cookie: (Cookie provided by the Registry)
{layer binary data stream}
:statuscode 200: OK
:statuscode 401: Requires authorization
:statuscode 404: Image not found
.. http:put:: /v1/images/(image_id)/layer
put image layer for a given ``image_id``
**Example Request**:
.. sourcecode:: http
PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1
Host: registry-1.docker.io
Transfer-Encoding: chunked
Authorization: Token signature=123abc,repository="foo/bar",access=write
{layer binary data stream}
:parameter image_id: the id for the layer you want to get
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
""
:statuscode 200: OK
:statuscode 401: Requires authorization
:statuscode 404: Image not found
Image
*****
.. http:put:: /v1/images/(image_id)/json
put image for a given ``image_id``
**Example Request**:
.. sourcecode:: http
PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
Cookie: (Cookie provided by the Registry)
{
id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c",
parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f",
created: "2013-04-30T17:46:10.843673+03:00",
container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7",
container_config: {
Hostname: "host-test",
User: "",
Memory: 0,
MemorySwap: 0,
AttachStdin: false,
AttachStdout: false,
AttachStderr: false,
PortSpecs: null,
Tty: false,
OpenStdin: false,
StdinOnce: false,
Env: null,
Cmd: [
"/bin/bash",
"-c",
"apt-get -q -yy -f install libevent-dev"
],
Dns: null,
Image: "imagename/blah",
Volumes: { },
VolumesFrom: ""
},
docker_version: "0.1.7"
}
:parameter image_id: the id for the layer you want to get
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
""
:statuscode 200: OK
:statuscode 401: Requires authorization
.. http:get:: /v1/images/(image_id)/json
get image for a given ``image_id``
**Example Request**:
.. sourcecode:: http
GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
Cookie: (Cookie provided by the Registry)
:parameter image_id: the id for the layer you want to get
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
X-Docker-Size: 456789
X-Docker-Checksum: b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087
{
id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c",
parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f",
created: "2013-04-30T17:46:10.843673+03:00",
container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7",
container_config: {
Hostname: "host-test",
User: "",
Memory: 0,
MemorySwap: 0,
AttachStdin: false,
AttachStdout: false,
AttachStderr: false,
PortSpecs: null,
Tty: false,
OpenStdin: false,
StdinOnce: false,
Env: null,
Cmd: [
"/bin/bash",
"-c",
"apt-get -q -yy -f install libevent-dev"
],
Dns: null,
Image: "imagename/blah",
Volumes: { },
VolumesFrom: ""
},
docker_version: "0.1.7"
}
:statuscode 200: OK
:statuscode 401: Requires authorization
:statuscode 404: Image not found
Ancestry
********
.. http:get:: /v1/images/(image_id)/ancestry
get ancestry for an image given an ``image_id``
**Example Request**:
.. sourcecode:: http
GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/ancestry HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
Cookie: (Cookie provided by the Registry)
:parameter image_id: the id for the layer you want to get
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
["088b4502f51920fbd9b7c503e87c7a2c05aa3adc3d35e79c031fa126b403200f",
"aeee63968d87c7da4a5cf5d2be6bee4e21bc226fd62273d180a49c96c62e4543",
"bfa4c5326bc764280b0863b46a4b20d940bc1897ef9c1dfec060604bdc383280",
"6ab5893c6927c15a15665191f2c6cf751f5056d8b95ceee32e43c5e8a3648544"]
:statuscode 200: OK
:statuscode 401: Requires authorization
:statuscode 404: Image not found
2.2 Tags
--------
.. http:get:: /v1/repositories/(namespace)/(repository)/tags
get all of the tags for the given repo.
**Example Request**:
.. sourcecode:: http
GET /v1/repositories/foo/bar/tags HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
Cookie: (Cookie provided by the Registry)
:parameter namespace: namespace for the repo
:parameter repository: name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
{
"latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
"0.1.1": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"
}
:statuscode 200: OK
:statuscode 401: Requires authorization
:statuscode 404: Repository not found
.. http:get:: /v1/repositories/(namespace)/(repository)/tags/(tag)
get a tag for the given repo.
**Example Request**:
.. sourcecode:: http
GET /v1/repositories/foo/bar/tags/latest HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
Cookie: (Cookie provided by the Registry)
:parameter namespace: namespace for the repo
:parameter repository: name for the repo
:parameter tag: name of tag you want to get
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
"9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"
:statuscode 200: OK
:statuscode 401: Requires authorization
:statuscode 404: Tag not found
.. http:delete:: /v1/repositories/(namespace)/(repository)/tags/(tag)
delete the tag for the repo
**Example Request**:
.. sourcecode:: http
DELETE /v1/repositories/foo/bar/tags/latest HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
Cookie: (Cookie provided by the Registry)
:parameter namespace: namespace for the repo
:parameter repository: name for the repo
:parameter tag: name of tag you want to delete
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
""
:statuscode 200: OK
:statuscode 401: Requires authorization
:statuscode 404: Tag not found
.. http:put:: /v1/repositories/(namespace)/(repository)/tags/(tag)
put a tag for the given repo.
**Example Request**:
.. sourcecode:: http
PUT /v1/repositories/foo/bar/tags/latest HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
Cookie: (Cookie provided by the Registry)
"9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"
:parameter namespace: namespace for the repo
:parameter repository: name for the repo
:parameter tag: name of tag you want to add
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
""
:statuscode 200: OK
:statuscode 400: Invalid data
:statuscode 401: Requires authorization
:statuscode 404: Image not found
2.3 Repositories
----------------
.. http:delete:: /v1/repositories/(namespace)/(repository)/
delete a repository
**Example Request**:
.. sourcecode:: http
DELETE /v1/repositories/foo/bar/ HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
Cookie: (Cookie provided by the Registry)
""
:parameter namespace: namespace for the repo
:parameter repository: name for the repo
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
""
:statuscode 200: OK
:statuscode 401: Requires authorization
:statuscode 404: Repository not found
2.4 Status
----------
.. http:get:: /v1/_ping
Check status of the registry. This endpoint is also used to determine if
the registry supports SSL.
**Example Request**:
.. sourcecode:: http
GET /v1/_ping HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
""
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200
Vary: Accept
Content-Type: application/json
X-Docker-Registry-Version: 0.6.0
""
:statuscode 200: OK
3 Authorization
===============
This is where we describe the authorization process, including the tokens and cookies.
TODO: add more info.

View file

@ -1,622 +0,0 @@
:title: Registry Documentation
:description: Documentation for docker Registry and Registry API
:keywords: docker, registry, api, index
.. _registryindexspec:
=====================
Registry & Index Spec
=====================
1. The 3 roles
===============
1.1 Index
---------
The Index is responsible for centralizing information about:
- User accounts
- Checksums of the images
- Public namespaces
The Index has different components:
- Web UI
- Meta-data store (comments, stars, list public repositories)
- Authentication service
- Tokenization
The index is authoritative for those information.
We expect that there will be only one instance of the index, run and managed by Docker Inc.
1.2 Registry
------------
- It stores the images and the graph for a set of repositories
- It does not have user accounts data
- It has no notion of user accounts or authorization
- It delegates authentication and authorization to the Index Auth service using tokens
- It supports different storage backends (S3, cloud files, local FS)
- It doesnt have a local database
- `Source Code <https://github.com/dotcloud/docker-registry>`_
We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
- **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are supported by dotCloud. It features read/write access, and delegates authentication and authorization to the Index.
- **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure that the customers of the third-party provider can “docker pull” those images locally.
- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotClouds control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
.. note::
The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):
- HTTP with GET (and PUT for read-write registries);
- local mount point;
- remote docker addressed through SSH.
The latter would only require two new commands in docker, e.g. ``registryget``
and ``registryput``, wrapping access to the local filesystem (and optionally
doing consistency checks). Authentication and authorization are then delegated
to SSH (e.g. with public keys).
1.3 Docker
----------
On top of being a runtime for LXC, Docker is the Registry client. It supports:
- Push / Pull on the registry
- Client authentication on the Index
2. Workflow
===========
2.1 Pull
--------
.. image:: /static_files/docker_pull_chart.png
1. Contact the Index to know where I should download “samalba/busybox”
2. Index replies:
a. ``samalba/busybox`` is on Registry A
b. here are the checksums for ``samalba/busybox`` (for all layers)
c. token
3. Contact Registry A to receive the layers for ``samalba/busybox`` (all of them to the base image). Registry A is authoritative for “samalba/busybox” but keeps a copy of all inherited layers and serve them all from the same location.
4. registry contacts index to verify if token/user is allowed to download images
5. Index returns true/false lettings registry know if it should proceed or error out
6. Get the payload for all layers
It's possible to run:
.. code-block:: bash
docker pull https://<registry>/repositories/samalba/busybox
In this case, Docker bypasses the Index. However the security is not guaranteed
(in case Registry A is corrupted) because there wont be any checksum checks.
Currently registry redirects to s3 urls for downloads, going forward all
downloads need to be streamed through the registry. The Registry will then
abstract the calls to S3 by a top-level class which implements sub-classes for
S3 and local storage.
Token is only returned when the ``X-Docker-Token`` header is sent with request.
Basic Auth is required to pull private repos. Basic auth isn't required for
pulling public repos, but if one is provided, it needs to be valid and for an
active account.
API (pulling repository foo/bar):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1. (Docker -> Index) GET /v1/repositories/foo/bar/images
**Headers**:
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
X-Docker-Token: true
**Action**:
(looking up the foo/bar in db and gets images and checksums for that repo (all if no tag is specified, if tag, only checksums for those tags) see part 4.4.1)
2. (Index -> Docker) HTTP 200 OK
**Headers**:
- Authorization: Token signature=123abc,repository=”foo/bar”,access=write
- X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
**Body**:
Jsonified checksums (see part 4.4.1)
3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=write
4. (Registry -> Index) GET /v1/repositories/foo/bar/images
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=read
**Body**:
<ids and checksums in payload>
**Action**:
( Lookup token see if they have access to pull.)
If good:
HTTP 200 OK
Index will invalidate the token
If bad:
HTTP 401 Unauthorized
5. (Docker -> Registry) GET /v1/images/928374982374/ancestry
**Action**:
(for each image id returned in the registry, fetch /json + /layer)
.. note::
If someone makes a second request, then we will always give a new token, never reuse tokens.
2.2 Push
--------
.. image:: /static_files/docker_push_chart.png
1. Contact the index to allocate the repository name “samalba/busybox” (authentication required with user credentials)
2. If authentication works and namespace available, “samalba/busybox” is allocated and a temporary token is returned (namespace is marked as initialized in index)
3. Push the image on the registry (along with the token)
4. Registry A contacts the Index to verify the token (token must corresponds to the repository name)
5. Index validates the token. Registry A starts reading the stream pushed by docker and store the repository (with its images)
6. docker contacts the index to give checksums for upload images
.. note::
**Its possible not to use the Index at all!** In this case, a deployed version of the Registry is deployed to store and serve images. Those images are not authenticated and the security is not guaranteed.
.. note::
**Index can be replaced!** For a private Registry deployed, a custom Index can be used to serve and validate token according to different policies.
Docker computes the checksums and submit them to the Index at the end of the
push. When a repository name does not have checksums on the Index, it means
that the push is in progress (since checksums are submitted at the end).
API (pushing repos foo/bar):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1. (Docker -> Index) PUT /v1/repositories/foo/bar/
**Headers**:
Authorization: Basic sdkjfskdjfhsdkjfh==
X-Docker-Token: true
**Action**::
- in index, we allocated a new repository, and set to initialized
**Body**::
(The body contains the list of images that are going to be pushed, with empty checksums. The checksums will be set at the end of the push)::
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}]
2. (Index -> Docker) 200 Created
**Headers**:
- WWW-Authenticate: Token signature=123abc,repository=”foo/bar”,access=write
- X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
3. (Docker -> Registry) PUT /v1/images/98765432_parent/json
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=write
4. (Registry->Index) GET /v1/repositories/foo/bar/images
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=write
**Action**::
- Index:
will invalidate the token.
- Registry:
grants a session (if token is approved) and fetches the images id
5. (Docker -> Registry) PUT /v1/images/98765432_parent/json
**Headers**::
- Authorization: Token signature=123abc,repository=”foo/bar”,access=write
- Cookie: (Cookie provided by the Registry)
6. (Docker -> Registry) PUT /v1/images/98765432/json
**Headers**:
Cookie: (Cookie provided by the Registry)
7. (Docker -> Registry) PUT /v1/images/98765432_parent/layer
**Headers**:
Cookie: (Cookie provided by the Registry)
8. (Docker -> Registry) PUT /v1/images/98765432/layer
**Headers**:
X-Docker-Checksum: sha256:436745873465fdjkhdfjkgh
9. (Docker -> Registry) PUT /v1/repositories/foo/bar/tags/latest
**Headers**:
Cookie: (Cookie provided by the Registry)
**Body**:
“98765432”
10. (Docker -> Index) PUT /v1/repositories/foo/bar/images
**Headers**:
Authorization: Basic 123oislifjsldfj==
X-Docker-Endpoints: registry1.docker.io (no validation on this right now)
**Body**:
(The image, ids, tags and checksums)
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
“checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
**Return** HTTP 204
.. note::
If push fails and they need to start again, what happens in the index, there will already be a record for the namespace/name, but it will be initialized. Should we allow it, or mark as name already used? One edge case could be if someone pushes the same thing at the same time with two different shells.
If it's a retry on the Registry, Docker has a cookie (provided by the registry after token validation). So the Index wont have to provide a new token.
2.3 Delete
----------
If you need to delete something from the index or registry, we need a nice
clean way to do that. Here is the workflow.
1. Docker contacts the index to request a delete of a repository ``samalba/busybox`` (authentication required with user credentials)
2. If authentication works and repository is valid, ``samalba/busybox`` is marked as deleted and a temporary token is returned
3. Send a delete request to the registry for the repository (along with the token)
4. Registry A contacts the Index to verify the token (token must corresponds to the repository name)
5. Index validates the token. Registry A deletes the repository and everything associated to it.
6. docker contacts the index to let it know it was removed from the registry, the index removes all records from the database.
.. note::
The Docker client should present an "Are you sure?" prompt to confirm the deletion before starting the process. Once it starts it can't be undone.
API (deleting repository foo/bar):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1. (Docker -> Index) DELETE /v1/repositories/foo/bar/
**Headers**:
Authorization: Basic sdkjfskdjfhsdkjfh==
X-Docker-Token: true
**Action**::
- in index, we make sure it is a valid repository, and set to deleted (logically)
**Body**::
Empty
2. (Index -> Docker) 202 Accepted
**Headers**:
- WWW-Authenticate: Token signature=123abc,repository=”foo/bar”,access=delete
- X-Docker-Endpoints: registry.docker.io [, registry2.docker.io] # list of endpoints where this repo lives.
3. (Docker -> Registry) DELETE /v1/repositories/foo/bar/
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=delete
4. (Registry->Index) PUT /v1/repositories/foo/bar/auth
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=delete
**Action**::
- Index:
will invalidate the token.
- Registry:
deletes the repository (if token is approved)
5. (Registry -> Docker) 200 OK
200 If success
403 if forbidden
400 if bad request
404 if repository isn't found
6. (Docker -> Index) DELETE /v1/repositories/foo/bar/
**Headers**:
Authorization: Basic 123oislifjsldfj==
X-Docker-Endpoints: registry-1.docker.io (no validation on this right now)
**Body**:
Empty
**Return** HTTP 200
3. How to use the Registry in standalone mode
=============================================
The Index has two main purposes (along with its fancy social features):
- Resolve short names (to avoid passing absolute URLs all the time)
- username/projectname -> \https://registry.docker.io/users/<username>/repositories/<projectname>/
- team/projectname -> \https://registry.docker.io/team/<team>/repositories/<projectname>/
- Authenticate a user as a repos owner (for a central referenced repository)
3.1 Without an Index
--------------------
Using the Registry without the Index can be useful to store the images on a
private network without having to rely on an external entity controlled by
Docker Inc.
In this case, the registry will be launched in a special mode (--standalone?
--no-index?). In this mode, the only thing which changes is that Registry will
never contact the Index to verify a token. It will be the Registry owner
responsibility to authenticate the user who pushes (or even pulls) an image
using any mechanism (HTTP auth, IP based, etc...).
In this scenario, the Registry is responsible for the security in case of data
corruption since the checksums are not delivered by a trusted entity.
As hinted previously, a standalone registry can also be implemented by any HTTP
server handling GET/PUT requests (or even only GET requests if no write access
is necessary).
3.2 With an Index
-----------------
The Index data needed by the Registry are simple:
- Serve the checksums
- Provide and authorize a Token
In the scenario of a Registry running on a private network with the need of
centralizing and authorizing, its easy to use a custom Index.
The only challenge will be to tell Docker to contact (and trust) this custom
Index. Docker will be configurable at some point to use a specific Index, itll
be the private entity responsibility (basically the organization who uses
Docker in a private environment) to maintain the Index and the Dockers
configuration among its consumers.
4. The API
==========
The first version of the api is available here: https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md
4.1 Images
----------
The format returned in the images is not defined here (for layer and JSON),
basically because Registry stores exactly the same kind of information as
Docker uses to manage them.
The format of ancestry is a line-separated list of image ids, in age order,
i.e. the images parent is on the last line, the parent of the parent on the
next-to-last line, etc.; if the image has no parent, the file is empty.
.. code-block:: bash
GET /v1/images/<image_id>/layer
PUT /v1/images/<image_id>/layer
GET /v1/images/<image_id>/json
PUT /v1/images/<image_id>/json
GET /v1/images/<image_id>/ancestry
PUT /v1/images/<image_id>/ancestry
4.2 Users
---------
4.2.1 Create a user (Index)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
POST /v1/users
**Body**:
{"email": "sam@dotcloud.com", "password": "toto42", "username": "foobar"'}
**Validation**:
- **username**: min 4 character, max 30 characters, must match the regular
expression [a-z0-9\_].
- **password**: min 5 characters
**Valid**: return HTTP 200
Errors: HTTP 400 (we should create error codes for possible errors)
- invalid json
- missing field
- wrong format (username, password, email, etc)
- forbidden name
- name already exists
.. note::
A user account will be valid only if the email has been validated (a validation link is sent to the email address).
4.2.2 Update a user (Index)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
PUT /v1/users/<username>
**Body**:
{"password": "toto"}
.. note::
We can also update email address, if they do, they will need to reverify their new email address.
4.2.3 Login (Index)
^^^^^^^^^^^^^^^^^^^
Does nothing else but asking for a user authentication. Can be used to validate
credentials. HTTP Basic Auth for now, maybe change in future.
GET /v1/users
**Return**:
- Valid: HTTP 200
- Invalid login: HTTP 401
- Account inactive: HTTP 403 Account is not Active
4.3 Tags (Registry)
-------------------
The Registry does not know anything about users. Even though repositories are
under usernames, its just a namespace for the registry. Allowing us to
implement organizations or different namespaces per user later, without
modifying the Registrys API.
The following naming restrictions apply:
- Namespaces must match the same regular expression as usernames (See 4.2.1.)
- Repository names must match the regular expression [a-zA-Z0-9-_.]
4.3.1 Get all tags
^^^^^^^^^^^^^^^^^^
GET /v1/repositories/<namespace>/<repository_name>/tags
**Return**: HTTP 200
{
"latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
“0.1.1”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”
}
4.3.2 Read the content of a tag (resolve the image id)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
GET /v1/repositories/<namespace>/<repo_name>/tags/<tag>
**Return**:
"9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"
4.3.3 Delete a tag (registry)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
DELETE /v1/repositories/<namespace>/<repo_name>/tags/<tag>
4.4 Images (Index)
------------------
For the Index to “resolve” the repository name to a Registry location, it uses
the X-Docker-Endpoints header. In other terms, this requests always add a
``X-Docker-Endpoints`` to indicate the location of the registry which hosts this
repository.
4.4.1 Get the images
^^^^^^^^^^^^^^^^^^^^^
GET /v1/repositories/<namespace>/<repo_name>/images
**Return**: HTTP 200
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
4.4.2 Add/update the images
^^^^^^^^^^^^^^^^^^^^^^^^^^^
You always add images, you never remove them.
PUT /v1/repositories/<namespace>/<repo_name>/images
**Body**:
[ {“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”} ]
**Return** 204
4.5 Repositories
----------------
4.5.1 Remove a Repository (Registry)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
DELETE /v1/repositories/<namespace>/<repo_name>
Return 200 OK
4.5.2 Remove a Repository (Index)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This starts the delete process. see 2.3 for more details.
DELETE /v1/repositories/<namespace>/<repo_name>
Return 202 OK
5. Chaining Registries
======================
Its possible to chain Registries server for several reasons:
- Load balancing
- Delegate the next request to another server
When a Registry is a reference for a repository, it should host the entire
images chain in order to avoid breaking the chain during the download.
The Index and Registry use this mechanism to redirect on one or the other.
Example with an image download:
On every request, a special header can be returned::
X-Docker-Endpoints: server1,server2
On the next request, the client will always pick a server from this list.
6. Authentication & Authorization
=================================
6.1 On the Index
-----------------
The Index supports both “Basic” and “Token” challenges. Usually when there is a
``401 Unauthorized``, the Index replies this::
401 Unauthorized
WWW-Authenticate: Basic realm="auth required",Token
You have 3 options:
1. Provide user credentials and ask for a token
**Header**:
- Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
- X-Docker-Token: true
In this case, along with the 200 response, youll get a new token (if user auth is ok):
If authorization isn't correct you get a 401 response.
If account isn't active you will get a 403 response.
**Response**:
- 200 OK
- X-Docker-Token: Token signature=123abc,repository=”foo/bar”,access=read
2. Provide user credentials only
**Header**:
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
3. Provide Token
**Header**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=read
6.2 On the Registry
-------------------
The Registry only supports the Token challenge::
401 Unauthorized
WWW-Authenticate: Token
The only way is to provide a token on ``401 Unauthorized`` responses::
Authorization: Token signature=123abc,repository="foo/bar",access=read
Usually, the Registry provides a Cookie when a Token verification succeeded.
Every time the Registry passes a Cookie, you have to pass it back the same
cookie.::
200 OK
Set-Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=&timestamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="; Path=/; HttpOnly
Next request::
GET /(...)
Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=&timestamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="
7 Document Version
====================
- 1.0 : May 6th 2013 : initial release
- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new source namespace.

View file

@ -1,55 +0,0 @@
:title: Remote API Client Libraries
:description: Various client libraries available to use with the Docker remote API
:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, JavaScript, Erlang, Go
==================================
Docker Remote API Client Libraries
==================================
These libraries have not been tested by the Docker Maintainers for
compatibility. Please file issues with the library owners. If you
find more library implementations, please list them in Docker doc bugs
and we will add the libraries here.
+----------------------+----------------+--------------------------------------------+----------+
| Language/Framework | Name | Repository | Status |
+======================+================+============================================+==========+
| Python | docker-py | https://github.com/dotcloud/docker-py | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Ruby | docker-client | https://github.com/geku/docker-client | Outdated |
+----------------------+----------------+--------------------------------------------+----------+
| Ruby | docker-api | https://github.com/swipely/docker-api | Active |
+----------------------+----------------+--------------------------------------------+----------+
| JavaScript (NodeJS) | dockerode | https://github.com/apocas/dockerode | Active |
| | | Install via NPM: `npm install dockerode` | |
+----------------------+----------------+--------------------------------------------+----------+
| JavaScript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active |
| | | Install via NPM: `npm install docker.io` | |
+----------------------+----------------+--------------------------------------------+----------+
| JavaScript | docker-js | https://github.com/dgoujard/docker-js | Outdated |
+----------------------+----------------+--------------------------------------------+----------+
| JavaScript (Angular) | docker-cp | https://github.com/13W/docker-cp | Active |
| **WebUI** | | | |
+----------------------+----------------+--------------------------------------------+----------+
| JavaScript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active |
| **WebUI** | | | |
+----------------------+----------------+--------------------------------------------+----------+
| Java | docker-java | https://github.com/kpelykh/docker-java | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Erlang | erldocker | https://github.com/proger/erldocker | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Go | go-dockerclient| https://github.com/fsouza/go-dockerclient | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Go | dockerclient | https://github.com/samalba/dockerclient | Active |
+----------------------+----------------+--------------------------------------------+----------+
| PHP | Alvine | http://pear.alvine.io/ (alpha) | Active |
+----------------------+----------------+--------------------------------------------+----------+
| PHP | Docker-PHP | http://stage1.github.io/docker-php/ | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Perl | Net::Docker | https://metacpan.org/pod/Net::Docker | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Perl | Eixo::Docker | https://github.com/alambike/eixo-docker | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Scala | reactive-docker| https://github.com/almoehi/reactive-docker | Active |
+----------------------+----------------+--------------------------------------------+----------+

View file

@ -1,532 +0,0 @@
:title: Dockerfile Reference
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
:keywords: builder, docker, Dockerfile, automation, image creation
.. _dockerbuilder:
====================
Dockerfile Reference
====================
**Docker can act as a builder** and read instructions from a text
``Dockerfile`` to automate the steps you would otherwise take manually
to create an image. Executing ``docker build`` will run your steps and
commit them along the way, giving you a final image.
.. _dockerfile_usage:
Usage
=====
To :ref:`build <cli_build>` an image from a source repository, create
a description file called ``Dockerfile`` at the root of your
repository. This file will describe the steps to assemble the image.
Then call ``docker build`` with the path of your source repository as
argument (for example, ``.``):
``sudo docker build .``
The path to the source repository defines where to find the *context*
of the build. The build is run by the Docker daemon, not by the CLI,
so the whole context must be transferred to the daemon. The Docker CLI
reports "Uploading context" when the context is sent to the daemon.
You can specify a repository and tag at which to save the new image if the
build succeeds:
``sudo docker build -t shykes/myapp .``
The Docker daemon will run your steps one-by-one, committing the
result to a new image if necessary, before finally outputting the
ID of your new image. The Docker daemon will automatically clean
up the context you sent.
Note that each instruction is run independently, and causes a new image
to be created - so ``RUN cd /tmp`` will not have any effect on the next
instructions.
Whenever possible, Docker will re-use the intermediate images,
accelerating ``docker build`` significantly (indicated by ``Using cache``):
.. code-block:: bash
$ docker build -t SvenDowideit/ambassador .
Uploading context 10.24 kB
Uploading context
Step 1 : FROM docker-ut
---> cbba202fe96b
Step 2 : MAINTAINER SvenDowideit@home.org.au
---> Using cache
---> 51182097be13
Step 3 : CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top
---> Using cache
---> 1a5ffc17324d
Successfully built 1a5ffc17324d
When you're done with your build, you're ready to look into
:ref:`image_push`.
.. _dockerfile_format:
Format
======
Here is the format of the Dockerfile:
::
# Comment
INSTRUCTION arguments
The Instruction is not case-sensitive, however convention is for them to be
UPPERCASE in order to distinguish them from arguments more easily.
Docker evaluates the instructions in a Dockerfile in order. **The
first instruction must be `FROM`** in order to specify the
:ref:`base_image_def` from which you are building.
Docker will treat lines that *begin* with ``#`` as a comment. A ``#``
marker anywhere else in the line will be treated as an argument. This
allows statements like:
::
# Comment
RUN echo 'we are running some # of cool things'
.. _dockerfile_instructions:
Here is the set of instructions you can use in a ``Dockerfile`` for
building images.
.. _dockerfile_from:
``FROM``
========
``FROM <image>``
Or
``FROM <image>:<tag>``
The ``FROM`` instruction sets the :ref:`base_image_def` for subsequent
instructions. As such, a valid Dockerfile must have ``FROM`` as its
first instruction. The image can be any valid image -- it is
especially easy to start by **pulling an image** from the
:ref:`using_public_repositories`.
``FROM`` must be the first non-comment instruction in the
``Dockerfile``.
``FROM`` can appear multiple times within a single Dockerfile in order
to create multiple images. Simply make a note of the last image id
output by the commit before each new ``FROM`` command.
If no ``tag`` is given to the ``FROM`` instruction, ``latest`` is
assumed. If the used tag does not exist, an error will be returned.
.. _dockerfile_maintainer:
``MAINTAINER``
==============
``MAINTAINER <name>``
The ``MAINTAINER`` instruction allows you to set the *Author* field of
the generated images.
.. _dockerfile_run:
``RUN``
=======
RUN has 2 forms:
* ``RUN <command>`` (the command is run in a shell - ``/bin/sh -c``)
* ``RUN ["executable", "param1", "param2"]`` (*exec* form)
The ``RUN`` instruction will execute any commands in a new layer on top
of the current image and commit the results. The resulting committed image
will be used for the next step in the Dockerfile.
Layering ``RUN`` instructions and generating commits conforms to the
core concepts of Docker where commits are cheap and containers can be
created from any point in an image's history, much like source
control.
The *exec* form makes it possible to avoid shell string munging, and to ``RUN``
commands using a base image that does not contain ``/bin/sh``.
Known Issues (RUN)
..................
* :issue:`783` is about file permissions problems that can occur when
using the AUFS file system. You might notice it during an attempt to
``rm`` a file, for example. The issue describes a workaround.
* :issue:`2424` Locale will not be set automatically.
.. _dockerfile_cmd:
``CMD``
=======
CMD has three forms:
* ``CMD ["executable","param1","param2"]`` (like an *exec*, preferred form)
* ``CMD ["param1","param2"]`` (as *default parameters to ENTRYPOINT*)
* ``CMD command param1 param2`` (as a *shell*)
There can only be one CMD in a Dockerfile. If you list more than one
CMD then only the last CMD will take effect.
**The main purpose of a CMD is to provide defaults for an executing
container.** These defaults can include an executable, or they can
omit the executable, in which case you must specify an ENTRYPOINT as
well.
When used in the shell or exec formats, the ``CMD`` instruction sets
the command to be executed when running the image.
If you use the *shell* form of the CMD, then the ``<command>`` will
execute in ``/bin/sh -c``:
.. code-block:: bash
FROM ubuntu
CMD echo "This is a test." | wc -
If you want to **run your** ``<command>`` **without a shell** then you
must express the command as a JSON array and give the full path to the
executable. **This array form is the preferred format of CMD.** Any
additional parameters must be individually expressed as strings in the
array:
.. code-block:: bash
FROM ubuntu
CMD ["/usr/bin/wc","--help"]
If you would like your container to run the same executable every
time, then you should consider using ``ENTRYPOINT`` in combination
with ``CMD``. See :ref:`dockerfile_entrypoint`.
If the user specifies arguments to ``docker run`` then they will
override the default specified in CMD.
.. note::
Don't confuse ``RUN`` with ``CMD``. ``RUN`` actually runs a
command and commits the result; ``CMD`` does not execute anything at
build time, but specifies the intended command for the image.
.. _dockerfile_expose:
``EXPOSE``
==========
``EXPOSE <port> [<port>...]``
The ``EXPOSE`` instructions informs Docker that the container will listen
on the specified network ports at runtime. Docker uses this information
to interconnect containers using links (see :ref:`links <working_with_links_names>`),
and to setup port redirection on the host system (see :ref:`port_redirection`).
.. _dockerfile_env:
``ENV``
=======
``ENV <key> <value>``
The ``ENV`` instruction sets the environment variable ``<key>`` to the
value ``<value>``. This value will be passed to all future ``RUN``
instructions. This is functionally equivalent to prefixing the command
with ``<key>=<value>``
The environment variables set using ``ENV`` will persist when a container is run
from the resulting image. You can view the values using ``docker inspect``, and change them using ``docker run --env <key>=<value>``.
.. note::
One example where this can cause unexpected consequenses, is setting
``ENV DEBIAN_FRONTEND noninteractive``.
Which will persist when the container is run interactively; for example:
``docker run -t -i image bash``
.. _dockerfile_add:
``ADD``
=======
``ADD <src> <dest>``
The ``ADD`` instruction will copy new files from <src> and add them to
the container's filesystem at path ``<dest>``.
``<src>`` must be the path to a file or directory relative to the
source directory being built (also called the *context* of the build) or
a remote file URL.
``<dest>`` is the absolute path to which the source will be copied inside the
destination container.
All new files and directories are created with mode 0755, uid and gid
0.
.. note::
if you build using STDIN (``docker build - < somefile``), there is no build
context, so the Dockerfile can only contain an URL based ADD statement.
.. note::
if your URL files are protected using authentication, you will need to use
an ``RUN wget`` , ``RUN curl`` or other tool from within the container as
ADD does not support authentication.
The copy obeys the following rules:
* The ``<src>`` path must be inside the *context* of the build; you cannot
``ADD ../something /something``, because the first step of a
``docker build`` is to send the context directory (and subdirectories) to
the docker daemon.
* If ``<src>`` is a URL and ``<dest>`` does not end with a trailing slash,
then a file is downloaded from the URL and copied to ``<dest>``.
* If ``<src>`` is a URL and ``<dest>`` does end with a trailing slash,
then the filename is inferred from the URL and the file is downloaded to
``<dest>/<filename>``. For instance, ``ADD http://example.com/foobar /``
would create the file ``/foobar``. The URL must have a nontrivial path
so that an appropriate filename can be discovered in this case
(``http://example.com`` will not work).
* If ``<src>`` is a directory, the entire directory is copied,
including filesystem metadata.
* If ``<src>`` is a *local* tar archive in a recognized compression
format (identity, gzip, bzip2 or xz) then it is unpacked as a
directory. Resources from *remote* URLs are **not** decompressed.
When a directory is copied or unpacked, it has the same behavior as
``tar -x``: the result is the union of
1. whatever existed at the destination path and
2. the contents of the source tree,
with conflicts resolved in favor of "2." on a file-by-file basis.
* If ``<src>`` is any other kind of file, it is copied individually
along with its metadata. In this case, if ``<dest>`` ends with a
trailing slash ``/``, it will be considered a directory and the
contents of ``<src>`` will be written at ``<dest>/base(<src>)``.
* If ``<dest>`` does not end with a trailing slash, it will be
considered a regular file and the contents of ``<src>`` will be
written at ``<dest>``.
* If ``<dest>`` doesn't exist, it is created along with all missing
directories in its path.
.. _dockerfile_entrypoint:
``ENTRYPOINT``
==============
ENTRYPOINT has two forms:
* ``ENTRYPOINT ["executable", "param1", "param2"]`` (like an *exec*,
preferred form)
* ``ENTRYPOINT command param1 param2`` (as a *shell*)
There can only be one ``ENTRYPOINT`` in a Dockerfile. If you have more
than one ``ENTRYPOINT``, then only the last one in the Dockerfile will
have an effect.
An ``ENTRYPOINT`` helps you to configure a container that you can run
as an executable. That is, when you specify an ``ENTRYPOINT``, then
the whole container runs as if it was just that executable.
The ``ENTRYPOINT`` instruction adds an entry command that will **not**
be overwritten when arguments are passed to ``docker run``, unlike the
behavior of ``CMD``. This allows arguments to be passed to the
entrypoint. i.e. ``docker run <image> -d`` will pass the "-d"
argument to the ENTRYPOINT.
You can specify parameters either in the ENTRYPOINT JSON array (as in
"like an exec" above), or by using a CMD statement. Parameters in the
ENTRYPOINT will not be overridden by the ``docker run`` arguments, but
parameters specified via CMD will be overridden by ``docker run``
arguments.
Like a ``CMD``, you can specify a plain string for the ENTRYPOINT and
it will execute in ``/bin/sh -c``:
.. code-block:: bash
FROM ubuntu
ENTRYPOINT wc -l -
For example, that Dockerfile's image will *always* take stdin as input
("-") and print the number of lines ("-l"). If you wanted to make
this optional but default, you could use a CMD:
.. code-block:: bash
FROM ubuntu
CMD ["-l", "-"]
ENTRYPOINT ["/usr/bin/wc"]
.. _dockerfile_volume:
``VOLUME``
==========
``VOLUME ["/data"]``
The ``VOLUME`` instruction will create a mount point with the specified name and mark it
as holding externally mounted volumes from native host or other containers. For more information/examples
and mounting instructions via docker client, refer to :ref:`volume_def` documentation.
.. _dockerfile_user:
``USER``
========
``USER daemon``
The ``USER`` instruction sets the username or UID to use when running
the image.
.. _dockerfile_workdir:
``WORKDIR``
===========
``WORKDIR /path/to/workdir``
The ``WORKDIR`` instruction sets the working directory for the ``RUN``, ``CMD`` and
``ENTRYPOINT`` Dockerfile commands that follow it.
It can be used multiple times in the one Dockerfile. If a relative path is
provided, it will be relative to the path of the previous ``WORKDIR``
instruction. For example:
WORKDIR /a
WORKDIR b
WORKDIR c
RUN pwd
The output of the final ``pwd`` command in this Dockerfile would be ``/a/b/c``.
``ONBUILD``
===========
``ONBUILD [INSTRUCTION]``
The ``ONBUILD`` instruction adds to the image a "trigger" instruction to be
executed at a later time, when the image is used as the base for another build.
The trigger will be executed in the context of the downstream build, as if it
had been inserted immediately after the *FROM* instruction in the downstream
Dockerfile.
Any build instruction can be registered as a trigger.
This is useful if you are building an image which will be used as a base to build
other images, for example an application build environment or a daemon which may be
customized with user-specific configuration.
For example, if your image is a reusable python application builder, it will require
application source code to be added in a particular directory, and it might require
a build script to be called *after* that. You can't just call *ADD* and *RUN* now,
because you don't yet have access to the application source code, and it will be
different for each application build. You could simply provide application developers
with a boilerplate Dockerfile to copy-paste into their application, but that is
inefficient, error-prone and difficult to update because it mixes with
application-specific code.
The solution is to use *ONBUILD* to register in advance instructions to run later,
during the next build stage.
Here's how it works:
1. When it encounters an *ONBUILD* instruction, the builder adds a trigger to
the metadata of the image being built.
The instruction does not otherwise affect the current build.
2. At the end of the build, a list of all triggers is stored in the image manifest,
under the key *OnBuild*. They can be inspected with *docker inspect*.
3. Later the image may be used as a base for a new build, using the *FROM* instruction.
As part of processing the *FROM* instruction, the downstream builder looks for *ONBUILD*
triggers, and executes them in the same order they were registered. If any of the
triggers fail, the *FROM* instruction is aborted which in turn causes the build
to fail. If all triggers succeed, the FROM instruction completes and the build
continues as usual.
4. Triggers are cleared from the final image after being executed. In other words
they are not inherited by "grand-children" builds.
For example you might add something like this:
.. code-block:: bash
[...]
ONBUILD ADD . /app/src
ONBUILD RUN /usr/local/bin/python-build --dir /app/src
[...]
.. warning:: Chaining ONBUILD instructions using `ONBUILD ONBUILD` isn't allowed.
.. warning:: ONBUILD may not trigger FROM or MAINTAINER instructions.
.. _dockerfile_examples:
Dockerfile Examples
======================
.. code-block:: bash
# Nginx
#
# VERSION 0.0.1
FROM ubuntu
MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
# make sure the package repository is up to date
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
RUN apt-get update
RUN apt-get install -y inotify-tools nginx apache2 openssh-server
.. code-block:: bash
# Firefox over VNC
#
# VERSION 0.3
FROM ubuntu
# make sure the package repository is up to date
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
RUN apt-get update
# Install vnc, xvfb in order to create a 'fake' display and firefox
RUN apt-get install -y x11vnc xvfb firefox
RUN mkdir /.vnc
# Setup a password
RUN x11vnc -storepasswd 1234 ~/.vnc/passwd
# Autostart firefox (might not be the best way, but it does the trick)
RUN bash -c 'echo "firefox" >> /.bashrc'
EXPOSE 5900
CMD ["x11vnc", "-forever", "-usepw", "-create"]
.. code-block:: bash
# Multiple images example
#
# VERSION 0.1
FROM ubuntu
RUN echo foo > bar
# Will output something like ===> 907ad6c2736f
FROM ubuntu
RUN echo moo > oink
# Will output something like ===> 695d7793cbe4
# You'll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with
# /oink.

File diff suppressed because it is too large Load diff

View file

@ -1,14 +0,0 @@
:title: Commands
:description: docker command line interface
:keywords: commands, command line, help, docker
Commands
========
Contents:
.. toctree::
:maxdepth: 1
cli

View file

@ -1,18 +0,0 @@
:title: Docker Reference Manual
:description: References
:keywords: docker, references, api, command line, commands
.. _references:
Reference Manual
================
Contents:
.. toctree::
:maxdepth: 1
commandline/index
builder
run
api/index

View file

@ -1,418 +0,0 @@
:title: Docker Run Reference
:description: Configure containers at runtime
:keywords: docker, run, configure, runtime
.. _run_docker:
====================
Docker Run Reference
====================
**Docker runs processes in isolated containers**. When an operator
executes ``docker run``, she starts a process with its own file
system, its own networking, and its own isolated process tree. The
:ref:`image_def` which starts the process may define defaults related
to the binary to run, the networking to expose, and more, but ``docker
run`` gives final control to the operator who starts the container
from the image. That's the main reason :ref:`cli_run` has more options
than any other ``docker`` command.
Every one of the :ref:`example_list` shows running containers, and so
here we try to give more in-depth guidance.
.. _run_running:
General Form
============
As you've seen in the :ref:`example_list`, the basic `run` command
takes this form::
docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...]
To learn how to interpret the types of ``[OPTIONS]``, see
:ref:`cli_options`.
The list of ``[OPTIONS]`` breaks down into two groups:
1. Settings exclusive to operators, including:
* Detached or Foreground running,
* Container Identification,
* Network settings, and
* Runtime Constraints on CPU and Memory
* Privileges and LXC Configuration
2. Setting shared between operators and developers, where operators
can override defaults developers set in images at build time.
Together, the ``docker run [OPTIONS]`` give complete control over
runtime behavior to the operator, allowing them to override all
defaults set by the developer during ``docker build`` and nearly all
the defaults set by the Docker runtime itself.
Operator Exclusive Options
==========================
Only the operator (the person executing ``docker run``) can set the
following options.
.. contents::
:local:
Detached vs Foreground
----------------------
When starting a Docker container, you must first decide if you want to
run the container in the background in a "detached" mode or in the
default foreground mode::
-d=false: Detached mode: Run container in the background, print new container id
Detached (-d)
.............
In detached mode (``-d=true`` or just ``-d``), all I/O should be done
through network connections or shared volumes because the container is
no longer listening to the commandline where you executed ``docker
run``. You can reattach to a detached container with ``docker``
:ref:`cli_attach`. If you choose to run a container in the detached
mode, then you cannot use the ``--rm`` option.
Foreground
..........
In foreground mode (the default when ``-d`` is not specified),
``docker run`` can start the process in the container and attach the
console to the process's standard input, output, and standard
error. It can even pretend to be a TTY (this is what most commandline
executables expect) and pass along signals. All of that is
configurable::
-a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr``
-t=false : Allocate a pseudo-tty
--sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
-i=false : Keep STDIN open even if not attached
If you do not specify ``-a`` then Docker will `attach everything
(stdin,stdout,stderr)
<https://github.com/dotcloud/docker/blob/75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797>`_. You
can specify to which of the three standard streams (``stdin``, ``stdout``,
``stderr``) you'd like to connect instead, as in::
docker run -a stdin -a stdout -i -t ubuntu /bin/bash
For interactive processes (like a shell) you will typically want a tty
as well as persistent standard input (``stdin``), so you'll use ``-i
-t`` together in most interactive cases.
Container Identification
------------------------
Name (--name)
.............
The operator can identify a container in three ways:
* UUID long identifier ("f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778")
* UUID short identifier ("f78375b1c487")
* Name ("evil_ptolemy")
The UUID identifiers come from the Docker daemon, and if you do not
assign a name to the container with ``--name`` then the daemon will
also generate a random string name too. The name can become a handy
way to add meaning to a container since you can use this name when
defining :ref:`links <working_with_links_names>` (or any other place
you need to identify a container). This works for both background and
foreground Docker containers.
PID Equivalent
..............
And finally, to help with automation, you can have Docker write the
container ID out to a file of your choosing. This is similar to how
some programs might write out their process ID to a file (you've seen
them as PID files)::
--cidfile="": Write the container ID to the file
Network Settings
----------------
::
-n=true : Enable networking for this container
--dns=[] : Set custom dns servers for the container
By default, all containers have networking enabled and they can make
any outgoing connections. The operator can completely disable
networking with ``docker run -n`` which disables all incoming and outgoing
networking. In cases like this, you would perform I/O through files or
STDIN/STDOUT only.
Your container will use the same DNS servers as the host by default,
but you can override this with ``--dns``.
Clean Up (--rm)
---------------
By default a container's file system persists even after the container
exits. This makes debugging a lot easier (since you can inspect the
final state) and you retain all your data by default. But if you are
running short-term **foreground** processes, these container file
systems can really pile up. If instead you'd like Docker to
**automatically clean up the container and remove the file system when
the container exits**, you can add the ``--rm`` flag::
--rm=false: Automatically remove the container when it exits (incompatible with -d)
Runtime Constraints on CPU and Memory
-------------------------------------
The operator can also adjust the performance parameters of the container::
-m="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
-c=0 : CPU shares (relative weight)
The operator can constrain the memory available to a container easily
with ``docker run -m``. If the host supports swap memory, then the
``-m`` memory setting can be larger than physical RAM.
Similarly the operator can increase the priority of this container
with the ``-c`` option. By default, all containers run at the same
priority and get the same proportion of CPU cycles, but you can tell
the kernel to give more shares of CPU time to one or more containers
when you start them via Docker.
Runtime Privilege and LXC Configuration
---------------------------------------
::
--privileged=false: Give extended privileges to this container
--lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
By default, Docker containers are "unprivileged" and cannot, for
example, run a Docker daemon inside a Docker container. This is
because by default a container is not allowed to access any devices,
but a "privileged" container is given access to all devices (see
lxc-template.go_ and documentation on `cgroups devices
<https://www.kernel.org/doc/Documentation/cgroups/devices.txt>`_).
When the operator executes ``docker run --privileged``, Docker will
enable to access to all devices on the host as well as set some
configuration in AppArmor to allow the container nearly all the same
access to the host as processes running outside containers on the
host. Additional information about running with ``--privileged`` is
available on the `Docker Blog
<http://blog.docker.io/2013/09/docker-can-now-run-within-docker/>`_.
If the Docker daemon was started using the ``lxc`` exec-driver
(``docker -d --exec-driver=lxc``) then the operator can also specify
LXC options using one or more ``--lxc-conf`` parameters. These can be
new parameters or override existing parameters from the lxc-template.go_.
Note that in the future, a given host's Docker daemon may not use LXC,
so this is an implementation-specific configuration meant for operators
already familiar with using LXC directly.
.. _lxc-template.go: https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go
Overriding ``Dockerfile`` Image Defaults
========================================
When a developer builds an image from a :ref:`Dockerfile
<dockerbuilder>` or when she commits it, the developer can set a
number of default parameters that take effect when the image starts up
as a container.
Four of the ``Dockerfile`` commands cannot be overridden at runtime:
``FROM, MAINTAINER, RUN``, and ``ADD``. Everything else has a
corresponding override in ``docker run``. We'll go through what the
developer might have set in each ``Dockerfile`` instruction and how the
operator can override that setting.
.. contents::
:local:
CMD (Default Command or Options)
--------------------------------
Recall the optional ``COMMAND`` in the Docker commandline::
docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...]
This command is optional because the person who created the ``IMAGE``
may have already provided a default ``COMMAND`` using the ``Dockerfile``
``CMD``. As the operator (the person running a container from the
image), you can override that ``CMD`` just by specifying a new
``COMMAND``.
If the image also specifies an ``ENTRYPOINT`` then the ``CMD`` or
``COMMAND`` get appended as arguments to the ``ENTRYPOINT``.
ENTRYPOINT (Default Command to Execute at Runtime
-------------------------------------------------
::
--entrypoint="": Overwrite the default entrypoint set by the image
The ENTRYPOINT of an image is similar to a ``COMMAND`` because it
specifies what executable to run when the container starts, but it is
(purposely) more difficult to override. The ``ENTRYPOINT`` gives a
container its default nature or behavior, so that when you set an
``ENTRYPOINT`` you can run the container *as if it were that binary*,
complete with default options, and you can pass in more options via
the ``COMMAND``. But, sometimes an operator may want to run something else
inside the container, so you can override the default ``ENTRYPOINT`` at
runtime by using a string to specify the new ``ENTRYPOINT``. Here is an
example of how to run a shell in a container that has been set up to
automatically run something else (like ``/usr/bin/redis-server``)::
docker run -i -t --entrypoint /bin/bash example/redis
or two examples of how to pass more parameters to that ENTRYPOINT::
docker run -i -t --entrypoint /bin/bash example/redis -c ls -l
docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help
EXPOSE (Incoming Ports)
-----------------------
The ``Dockerfile`` doesn't give much control over networking, only
providing the ``EXPOSE`` instruction to give a hint to the operator
about what incoming ports might provide services. The following
options work with or override the ``Dockerfile``'s exposed defaults::
--expose=[]: Expose a port from the container
without publishing it to your host
-P=false : Publish all exposed ports to the host interfaces
-p=[] : Publish a container's port to the host (format:
ip:hostPort:containerPort | ip::containerPort |
hostPort:containerPort)
(use 'docker port' to see the actual mapping)
--link="" : Add link to another container (name:alias)
As mentioned previously, ``EXPOSE`` (and ``--expose``) make a port
available **in** a container for incoming connections. The port number
on the inside of the container (where the service listens) does not
need to be the same number as the port exposed on the outside of the
container (where clients connect), so inside the container you might
have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in
the ``Dockerfile``), but outside the container the port might be 42800.
To help a new client container reach the server container's internal
port operator ``--expose``'d by the operator or ``EXPOSE``'d by the
developer, the operator has three choices: start the server container
with ``-P`` or ``-p,`` or start the client container with ``--link``.
If the operator uses ``-P`` or ``-p`` then Docker will make the
exposed port accessible on the host and the ports will be available to
any client that can reach the host. To find the map between the host
ports and the exposed ports, use ``docker port``)
If the operator uses ``--link`` when starting the new client container,
then the client container can access the exposed port via a private
networking interface. Docker will set some environment variables in
the client container to help indicate which interface and port to use.
ENV (Environment Variables)
---------------------------
The operator can **set any environment variable** in the container by
using one or more ``-e`` flags, even overriding those already defined by the
developer with a Dockefile ``ENV``::
$ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export
declare -x HOME="/"
declare -x HOSTNAME="85bc26a0e200"
declare -x OLDPWD
declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
declare -x PWD="/"
declare -x SHLVL="1"
declare -x container="lxc"
declare -x deep="purple"
Similarly the operator can set the **hostname** with ``-h``.
``--link name:alias`` also sets environment variables, using the
*alias* string to define environment variables within the container
that give the IP and PORT information for connecting to the service
container. Let's imagine we have a container running Redis::
# Start the service container, named redis-name
$ docker run -d --name redis-name dockerfiles/redis
4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3
# The redis-name container exposed port 6379
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4241164edf6f dockerfiles/redis:latest /redis-stable/src/re 5 seconds ago Up 4 seconds 6379/tcp redis-name
# Note that there are no public ports exposed since we didn't use -p or -P
$ docker port 4241164edf6f 6379
2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f
Yet we can get information about the Redis container's exposed ports
with ``--link``. Choose an alias that will form a valid environment
variable!
::
$ docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export
declare -x HOME="/"
declare -x HOSTNAME="acda7f7b1cdc"
declare -x OLDPWD
declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
declare -x PWD="/"
declare -x REDIS_ALIAS_NAME="/distracted_wright/redis"
declare -x REDIS_ALIAS_PORT="tcp://172.17.0.32:6379"
declare -x REDIS_ALIAS_PORT_6379_TCP="tcp://172.17.0.32:6379"
declare -x REDIS_ALIAS_PORT_6379_TCP_ADDR="172.17.0.32"
declare -x REDIS_ALIAS_PORT_6379_TCP_PORT="6379"
declare -x REDIS_ALIAS_PORT_6379_TCP_PROTO="tcp"
declare -x SHLVL="1"
declare -x container="lxc"
And we can use that information to connect from another container as a client::
$ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
172.17.0.32:6379>
VOLUME (Shared Filesystems)
---------------------------
::
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
If "container-dir" is missing, then docker creates a new volume.
--volumes-from="": Mount all volumes from the given container(s)
The volumes commands are complex enough to have their own
documentation in section :ref:`volume_def`. A developer can define one
or more ``VOLUME``\s associated with an image, but only the operator can
give access from one container to another (or from a container to a
volume mounted on the host).
USER
----
The default user within a container is ``root`` (id = 0), but if the
developer created additional users, those are accessible too. The
developer can set a default user to run the first process with the
``Dockerfile USER`` command, but the operator can override it ::
-u="": Username or UID
WORKDIR
-------
The default working directory for running binaries within a container is the root directory (``/``), but the developer can set a different default with the ``Dockerfile WORKDIR`` command. The operator can override this with::
-w="": Working directory inside the container

View file

@ -1,47 +0,0 @@
:title: Container
:description: Definitions of a container
:keywords: containers, lxc, concepts, explanation, image, container
.. _container_def:
Container
=========
.. image:: images/docker-filesystems-busyboxrw.png
Once you start a process in Docker from an :ref:`image_def`, Docker
fetches the image and its :ref:`parent_image_def`, and repeats the
process until it reaches the :ref:`base_image_def`. Then the
:ref:`ufs_def` adds a read-write layer on top. That read-write layer,
plus the information about its :ref:`parent_image_def` and some
additional information like its unique id, networking configuration,
and resource limits is called a **container**.
.. _container_state_def:
Container State
...............
Containers can change, and so they have state. A container may be
**running** or **exited**.
When a container is running, the idea of a "container" also includes a
tree of processes running on the CPU, isolated from the other
processes running on the host.
When the container is exited, the state of the file system and
its exit value is preserved. You can start, stop, and restart a
container. The processes restart from scratch (their memory state is
**not** preserved in a container), but the file system is just as it
was when the container was stopped.
You can promote a container to an :ref:`image_def` with ``docker
commit``. Once a container is an image, you can use it as a parent for
new containers.
Container IDs
.............
All containers are identified by a 64 hexadecimal digit string (internally a 256bit
value). To simplify their use, a short ID of the first 12 characters can be used
on the commandline. There is a small possibility of short id collisions, so the
docker server will always return the long ID.

View file

@ -1,38 +0,0 @@
:title: File Systems
:description: How Linux organizes its persistent storage
:keywords: containers, files, linux
.. _filesystem_def:
File System
===========
.. image:: images/docker-filesystems-generic.png
In order for a Linux system to run, it typically needs two `file
systems <http://en.wikipedia.org/wiki/Filesystem>`_:
1. boot file system (bootfs)
2. root file system (rootfs)
The **boot file system** contains the bootloader and the kernel. The
user never makes any changes to the boot file system. In fact, soon
after the boot process is complete, the entire kernel is in memory,
and the boot file system is unmounted to free up the RAM associated
with the initrd disk image.
The **root file system** includes the typical directory structure we
associate with Unix-like operating systems: ``/dev, /proc, /bin, /etc,
/lib, /usr,`` and ``/tmp`` plus all the configuration files, binaries
and libraries required to run user applications (like bash, ls, and so
forth).
While there can be important kernel differences between different
Linux distributions, the contents and organization of the root file
system are usually what make your software packages dependent on one
distribution versus another. Docker can help solve this problem by
running multiple distributions at the same time.
.. image:: images/docker-filesystems-multiroot.png

View file

@ -1,46 +0,0 @@
:title: Images
:description: Definition of an image
:keywords: containers, lxc, concepts, explanation, image, container
.. _image_def:
Image
=====
.. image:: images/docker-filesystems-debian.png
In Docker terminology, a read-only :ref:`layer_def` is called an
**image**. An image never changes.
Since Docker uses a :ref:`ufs_def`, the processes think the whole file
system is mounted read-write. But all the changes go to the top-most
writeable layer, and underneath, the original file in the read-only
image is unchanged. Since images don't change, images do not have state.
.. image:: images/docker-filesystems-debianrw.png
.. _parent_image_def:
Parent Image
............
.. image:: images/docker-filesystems-multilayer.png
Each image may depend on one more image which forms the layer beneath
it. We sometimes say that the lower image is the **parent** of the
upper image.
.. _base_image_def:
Base Image
..........
An image that has no parent is a **base image**.
Image IDs
.........
All images are identified by a 64 hexadecimal digit string (internally a 256bit
value). To simplify their use, a short ID of the first 12 characters can be used
on the command line. There is a small possibility of short id collisions, so the
docker server will always return the long ID.

View file

@ -1,24 +0,0 @@
:title: Glossary
:description: Definitions of terms used in Docker documentation
:keywords: concepts, documentation, docker, containers
Glossary
========
Definitions of terms used in Docker documentation.
Contents:
.. toctree::
:maxdepth: 1
filesystem
layer
image
container
registry
repository

View file

@ -1,40 +0,0 @@
:title: Layers
:description: Organizing the Docker Root File System
:keywords: containers, lxc, concepts, explanation, image, container
Layers
======
In a traditional Linux boot, the kernel first mounts the root
:ref:`filesystem_def` as read-only, checks its integrity, and then
switches the whole rootfs volume to read-write mode.
.. _layer_def:
Layer
.....
When Docker mounts the rootfs, it starts read-only, as in a traditional
Linux boot, but then, instead of changing the file system to
read-write mode, it takes advantage of a `union mount
<http://en.wikipedia.org/wiki/Union_mount>`_ to add a read-write file
system *over* the read-only file system. In fact there may be multiple
read-only file systems stacked on top of each other. We think of each
one of these file systems as a **layer**.
.. image:: images/docker-filesystems-multilayer.png
At first, the top read-write layer has nothing in it, but any time a
process creates a file, this happens in the top layer. And if
something needs to update an existing file in a lower layer, then the
file gets copied to the upper layer and changes go into the copy. The
version of the file on the lower layer cannot be seen by the
applications anymore, but it is there, unchanged.
.. _ufs_def:
Union File System
.................
We call the union of the read-write layer and all the read-only layers
a **union file system**.

View file

@ -1,16 +0,0 @@
:title: Registry
:description: Definition of an Registry
:keywords: containers, lxc, concepts, explanation, image, repository, container
.. _registry_def:
Registry
==========
A Registry is a hosted service containing :ref:`repositories<repository_def>`
of :ref:`images<image_def>` which responds to the Registry API.
The default registry can be accessed using a browser at http://images.docker.io
or using the ``sudo docker search`` command.
For more information see :ref:`Working with Repositories<working_with_the_repository>`

View file

@ -1,30 +0,0 @@
:title: Repository
:description: Definition of an Repository
:keywords: containers, lxc, concepts, explanation, image, repository, container
.. _repository_def:
Repository
==========
A repository is a set of images either on your local Docker server, or
shared, by pushing it to a :ref:`Registry<registry_def>` server.
Images can be associated with a repository (or multiple) by giving them an image name
using one of three different commands:
1. At build time (e.g. ``sudo docker build -t IMAGENAME``),
2. When committing a container (e.g. ``sudo docker commit CONTAINERID IMAGENAME``) or
3. When tagging an image id with an image name (e.g. ``sudo docker tag IMAGEID IMAGENAME``).
A `Fully Qualified Image Name` (FQIN) can be made up of 3 parts:
``[registry_hostname[:port]/][user_name/](repository_name:version_tag)``
``username`` and ``registry_hostname`` default to an empty string.
When ``registry_hostname`` is an empty string, then ``docker push`` will push to ``index.docker.io:80``.
If you create a new repository which you want to share, you will need to set at least the
``user_name``, as the 'default' blank ``user_name`` prefix is reserved for official Docker images.
For more information see :ref:`Working with Repositories<working_with_the_repository>`

View file

@ -1,21 +0,0 @@
:title: Documentation
:description: -- todo: change me
:keywords: todo, docker, documentation, installation, usage, examples, contributing, faq, command line, concepts
Documentation
=============
This documentation has the following resources:
.. toctree::
:maxdepth: 1
installation/index
use/index
examples/index
reference/index
contributing/index
terms/index
articles/index
faq

View file

@ -1,183 +0,0 @@
:title: Link via an Ambassador Container
:description: Using the Ambassador pattern to abstract (network) services
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
.. _ambassador_pattern_linking:
Link via an Ambassador Container
================================
Rather than hardcoding network links between a service consumer and provider, Docker
encourages service portability.
eg, instead of
.. code-block:: bash
(consumer) --> (redis)
requiring you to restart the ``consumer`` to attach it to a different ``redis`` service,
you can add ambassadors
.. code-block:: bash
(consumer) --> (redis-ambassador) --> (redis)
or
(consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis)
When you need to rewire your consumer to talk to a different redis server, you
can just restart the ``redis-ambassador`` container that the consumer is connected to.
This pattern also allows you to transparently move the redis server to a different
docker host from the consumer.
Using the ``svendowideit/ambassador`` container, the link wiring is controlled entirely
from the ``docker run`` parameters.
Two host Example
----------------
Start actual redis server on one Docker host
.. code-block:: bash
big-server $ docker run -d --name redis crosbymichael/redis
Then add an ambassador linked to the redis server, mapping a port to the outside world
.. code-block:: bash
big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador
On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server``
.. code-block:: bash
client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
Then on the ``client-server`` host, you can use a redis client container to talk
to the remote redis server, just by linking to the local redis ambassador.
.. code-block:: bash
client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
How it works
------------
The following example shows what the ``svendowideit/ambassador`` container does
automatically (with a tiny amount of ``sed``)
On the docker host (192.168.1.52) that redis will run on:
.. code-block:: bash
# start actual redis server
$ docker run -d --name redis crosbymichael/redis
# get a redis-cli container for connection testing
$ docker pull relateiq/redis-cli
# test the redis server by talking to it directly
$ docker run -t -i --rm --link redis:redis relateiq/redis-cli
redis 172.17.0.136:6379> ping
PONG
^D
# add redis ambassador
$ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh
in the redis_ambassador container, you can see the linked redis containers's env
.. code-block:: bash
$ env
REDIS_PORT=tcp://172.17.0.136:6379
REDIS_PORT_6379_TCP_ADDR=172.17.0.136
REDIS_NAME=/redis_ambassador/redis
HOSTNAME=19d7adf4705e
REDIS_PORT_6379_TCP_PORT=6379
HOME=/
REDIS_PORT_6379_TCP_PROTO=tcp
container=lxc
REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379
TERM=xterm
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/
This environment is used by the ambassador socat script to expose redis to the world
(via the -p 6379:6379 port mapping)
.. code-block:: bash
$ docker rm redis_ambassador
$ sudo ./contrib/mkimage-unittest.sh
$ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
then ping the redis server via the ambassador
.. code-block::bash
$ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
Now goto a different server
.. code-block:: bash
$ sudo ./contrib/mkimage-unittest.sh
$ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
and get the redis-cli image so we can talk over the ambassador bridge
.. code-block:: bash
$ docker pull relateiq/redis-cli
$ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
The svendowideit/ambassador Dockerfile
--------------------------------------
The ``svendowideit/ambassador`` image is a small busybox image with ``socat`` built in.
When you start the container, it uses a small ``sed`` script to parse out the (possibly multiple)
link environment variables to set up the port forwarding. On the remote host, you need to set the
variable using the ``-e`` command line option.
``--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the
local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``.
::
#
#
# first you need to build the docker-ut image
# using ./contrib/mkimage-unittest.sh
# then
# docker build -t SvenDowideit/ambassador .
# docker tag SvenDowideit/ambassador ambassador
# then to run it (on the host that has the real backend on it)
# docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador
# on the remote host, you can set up another ambassador
# docker run -t -i --name redis_ambassador --expose 6379 sh
FROM docker-ut
MAINTAINER SvenDowideit@home.org.au
CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top

View file

@ -1,199 +0,0 @@
:title: First steps with Docker
:description: Common usage and commands
:keywords: Examples, Usage, basic commands, docker, documentation, examples
First steps with Docker
=======================
Check your Docker install
-------------------------
This guide assumes you have a working installation of Docker. To check
your Docker install, run the following command:
.. code-block:: bash
# Check that you have a working install
docker info
If you get ``docker: command not found`` or something like
``/var/lib/docker/repositories: permission denied`` you may have an incomplete
docker installation or insufficient privileges to access Docker on your machine.
Please refer to :ref:`installation_list` for installation instructions.
Download a pre-built image
--------------------------
.. code-block:: bash
# Download an ubuntu image
sudo docker pull ubuntu
This will find the ``ubuntu`` image by name in the :ref:`Central Index
<searching_central_index>` and download it from the top-level Central
Repository to a local image cache.
.. NOTE:: When the image has successfully downloaded, you will see a
12 character hash ``539c0211cd76: Download complete`` which is the
short form of the image ID. These short image IDs are the first 12
characters of the full image ID - which can be found using ``docker
inspect`` or ``docker images --no-trunc=true``
**If you're using OS X** then you shouldn't use ``sudo``
Running an interactive shell
----------------------------
.. code-block:: bash
# Run an interactive shell in the ubuntu image,
# allocate a tty, attach stdin and stdout
# To detach the tty without exiting the shell,
# use the escape sequence Ctrl-p + Ctrl-q
# note: This will continue to exist in a stopped state once exited (see "docker ps -a")
sudo docker run -i -t ubuntu /bin/bash
.. _bind_docker:
Bind Docker to another host/port or a Unix socket
-------------------------------------------------
.. warning:: Changing the default ``docker`` daemon binding to a TCP
port or Unix *docker* user group will increase your security risks
by allowing non-root users to gain *root* access on the
host. Make sure you control access to ``docker``. If you are binding
to a TCP port, anyone with access to that port has full Docker access;
so it is not advisable on an open network.
With ``-H`` it is possible to make the Docker daemon to listen on a
specific IP and port. By default, it will listen on
``unix:///var/run/docker.sock`` to allow only local connections by the
*root* user. You *could* set it to ``0.0.0.0:4243`` or a specific host IP to
give access to everybody, but that is **not recommended** because then
it is trivial for someone to gain root access to the host where the
daemon is running.
Similarly, the Docker client can use ``-H`` to connect to a custom port.
``-H`` accepts host and port assignment in the following format:
``tcp://[host][:port]`` or ``unix://path``
For example:
* ``tcp://host:4243`` -> tcp connection on host:4243
* ``unix://path/to/socket`` -> unix socket located at ``path/to/socket``
``-H``, when empty, will default to the same value as when no ``-H`` was passed in.
``-H`` also accepts short form for TCP bindings:
``host[:port]`` or ``:port``
.. code-block:: bash
# Run docker in daemon mode
sudo <path to>/docker -H 0.0.0.0:5555 -d &
# Download an ubuntu image
sudo docker -H :5555 pull ubuntu
You can use multiple ``-H``, for example, if you want to listen on
both TCP and a Unix socket
.. code-block:: bash
# Run docker in daemon mode
sudo <path to>/docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -d &
# Download an ubuntu image, use default Unix socket
sudo docker pull ubuntu
# OR use the TCP port
sudo docker -H tcp://127.0.0.1:4243 pull ubuntu
Starting a long-running worker process
--------------------------------------
.. code-block:: bash
# Start a very useful long-running process
JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
# Collect the output of the job so far
sudo docker logs $JOB
# Kill the job
sudo docker kill $JOB
Listing containers
------------------
.. code-block:: bash
sudo docker ps # Lists only running containers
sudo docker ps -a # Lists all containers
Controlling containers
----------------------
.. code-block:: bash
# Start a new container
JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
# Stop the container
docker stop $JOB
# Start the container
docker start $JOB
# Restart the container
docker restart $JOB
# SIGKILL a container
docker kill $JOB
# Remove a container
docker stop $JOB # Container must be stopped to remove it
docker rm $JOB
Bind a service on a TCP port
------------------------------
.. code-block:: bash
# Bind port 4444 of this container, and tell netcat to listen on it
JOB=$(sudo docker run -d -p 4444 ubuntu:12.10 /bin/nc -l 4444)
# Which public port is NATed to my container?
PORT=$(sudo docker port $JOB 4444 | awk -F: '{ print $2 }')
# Connect to the public port
echo hello world | nc 127.0.0.1 $PORT
# Verify that the network connection worked
echo "Daemon received: $(sudo docker logs $JOB)"
Committing (saving) a container state
-------------------------------------
Save your containers state to a container image, so the state can be re-used.
When you commit your container only the differences between the image the
container was created from and the current state of the container will be
stored (as a diff). See which images you already have using the ``docker
images`` command.
.. code-block:: bash
# Commit your container to a new named image
sudo docker commit <container_id> <some_name>
# List your containers
sudo docker images
You now have a image state from which you can create new instances.
Read more about :ref:`working_with_the_repository` or continue to the
complete :ref:`cli`

View file

@ -1,95 +0,0 @@
:title: Chef Usage
:description: Installation and using Docker via Chef
:keywords: chef, installation, usage, docker, documentation
.. _install_using_chef:
Using Chef
=============
.. note::
Please note this is a community contributed installation path. The
only 'official' installation is using the :ref:`ubuntu_linux`
installation path. This version may sometimes be out of date.
Requirements
------------
To use this guide you'll need a working installation of
`Chef <http://www.getchef.com/>`_. This cookbook supports a variety of
operating systems.
Installation
------------
The cookbook is available on the `Chef Community Site
<community.opscode.com/cookbooks/docker>`_ and can be installed
using your favorite cookbook dependency manager.
The source can be found on `GitHub
<https://github.com/bflad/chef-docker>`_.
Usage
-----
The cookbook provides recipes for installing Docker, configuring init
for Docker, and resources for managing images and containers.
It supports almost all Docker functionality.
Installation
~~~~~~~~~~~~
.. code-block:: ruby
include_recipe 'docker'
Images
~~~~~~
The next step is to pull a Docker image. For this, we have a resource:
.. code-block:: ruby
docker_image 'samalba/docker-registry'
This is equivalent to running:
.. code-block:: bash
docker pull samalba/docker-registry
There are attributes available to control how long the cookbook
will allow for downloading (5 minute default).
To remove images you no longer need:
.. code-block:: ruby
docker_image 'samalba/docker-registry' do
action :remove
end
Containers
~~~~~~~~~~
Now you have an image where you can run commands within a container
managed by Docker.
.. code-block:: ruby
docker_container 'samalba/docker-registry' do
detach true
port '5000:5000'
env 'SETTINGS_FLAVOR=local'
volume '/mnt/docker:/docker-storage'
end
This is equivalent to running the following command, but under upstart:
.. code-block:: bash
docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry
The resources will accept a single string or an array of values
for any docker flags that allow multiple values.

View file

@ -1,74 +0,0 @@
:title: Automatically Start Containers
:description: How to generate scripts for upstart, systemd, etc.
:keywords: systemd, upstart, supervisor, docker, documentation, host integration
Automatically Start Containers
==============================
You can use your Docker containers with process managers like ``upstart``,
``systemd`` and ``supervisor``.
Introduction
------------
If you want a process manager to manage your containers you will need to run
the docker daemon with the ``-r=false`` so that docker will not automatically
restart your containers when the host is restarted.
When you have finished setting up your image and are happy with your
running container, you can then attach a process manager to manage
it. When your run ``docker start -a`` docker will automatically attach
to the running container, or start it if needed and forward all signals
so that the process manager can detect when a container stops and correctly
restart it.
Here are a few sample scripts for systemd and upstart to integrate with docker.
Sample Upstart Script
---------------------
In this example we've already created a container to run Redis with
``--name redis_server``. To create an upstart script for our container,
we create a file named ``/etc/init/redis.conf`` and place the following
into it:
.. code-block:: bash
description "Redis container"
author "Me"
start on filesystem and started docker
stop on runlevel [!2345]
respawn
script
/usr/bin/docker start -a redis_server
end script
Next, we have to configure docker so that it's run with the option ``-r=false``.
Run the following command:
.. code-block:: bash
$ sudo sh -c "echo 'DOCKER_OPTS=\"-r=false\"' > /etc/default/docker"
Sample systemd Script
---------------------
.. code-block:: bash
[Unit]
Description=Redis container
Author=Me
After=docker.service
[Service]
Restart=always
ExecStart=/usr/bin/docker start -a redis_server
ExecStop=/usr/bin/docker stop -t 2 redis_server
[Install]
WantedBy=local.target

View file

@ -1,24 +0,0 @@
:title: Documentation
:description: -- todo: change me
:keywords: todo, docker, documentation, basic, builder
Use
========
Contents:
.. toctree::
:maxdepth: 1
basics
workingwithrepository
port_redirection
networking
host_integration
working_with_volumes
working_with_links_names
ambassador_pattern_linking
chef
puppet

View file

@ -1,153 +0,0 @@
:title: Configure Networking
:description: Docker networking
:keywords: network, networking, bridge, docker, documentation
Configure Networking
====================
Docker uses Linux bridge capabilities to provide network connectivity
to containers. The ``docker0`` bridge interface is managed by Docker
for this purpose. When the Docker daemon starts it :
- creates the ``docker0`` bridge if not present
- searches for an IP address range which doesn't overlap with an existing route
- picks an IP in the selected range
- assigns this IP to the ``docker0`` bridge
.. code-block:: bash
# List host bridges
$ sudo brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.000000000000 no
# Show docker0 IP address
$ sudo ifconfig docker0
docker0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
inet addr:172.17.42.1 Bcast:0.0.0.0 Mask:255.255.0.0
At runtime, a :ref:`specific kind of virtual
interface<vethxxxx-device>` is given to each container which is then
bonded to the ``docker0`` bridge. Each container also receives a
dedicated IP address from the same range as ``docker0``. The
``docker0`` IP address is used as the default gateway for the
container.
.. code-block:: bash
# Run a container
$ sudo docker run -t -i -d base /bin/bash
52f811c5d3d69edddefc75aff5a4525fc8ba8bcfa1818132f9dc7d4f7c7e78b4
$ sudo brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.fef213db5a66 no vethQCDY1N
Above, ``docker0`` acts as a bridge for the ``vethQCDY1N`` interface
which is dedicated to the 52f811c5d3d6 container.
How to use a specific IP address range
---------------------------------------
Docker will try hard to find an IP range that is not used by the
host. Even though it works for most cases, it's not bullet-proof and
sometimes you need to have more control over the IP addressing scheme.
For this purpose, Docker allows you to manage the ``docker0`` bridge
or your own one using the ``-b=<bridgename>`` parameter.
In this scenario:
- ensure Docker is stopped
- create your own bridge (``bridge0`` for example)
- assign a specific IP to this bridge
- start Docker with the ``-b=bridge0`` parameter
.. code-block:: bash
# Stop Docker
$ sudo service docker stop
# Clean docker0 bridge and
# add your very own bridge0
$ sudo ifconfig docker0 down
$ sudo brctl addbr bridge0
$ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0
# Edit your Docker startup file
$ echo "DOCKER_OPTS=\"-b=bridge0\"" >> /etc/default/docker
# Start Docker
$ sudo service docker start
# Ensure bridge0 IP is not changed by Docker
$ sudo ifconfig bridge0
bridge0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
inet addr:192.168.227.1 Bcast:192.168.227.255 Mask:255.255.255.0
# Run a container
$ docker run -i -t base /bin/bash
# Container IP in the 192.168.227/24 range
root@261c272cd7d5:/# ifconfig eth0
eth0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
inet addr:192.168.227.5 Bcast:192.168.227.255 Mask:255.255.255.0
# bridge0 IP as the default gateway
root@261c272cd7d5:/# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.227.1 0.0.0.0 UG 0 0 0 eth0
192.168.227.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
# hits CTRL+P then CTRL+Q to detach
# Display bridge info
$ sudo brctl show
bridge name bridge id STP enabled interfaces
bridge0 8000.fe7c2e0faebd no vethAQI2QT
Container intercommunication
-------------------------------
The value of the Docker daemon's ``icc`` parameter determines whether
containers can communicate with each other over the bridge network.
- The default, ``--icc=true`` allows containers to communicate with each other.
- ``--icc=false`` means containers are isolated from each other.
Docker uses ``iptables`` under the hood to either accept or
drop communication between containers.
.. _vethxxxx-device:
What is the vethXXXX device?
-----------------------------------
Well. Things get complicated here.
The ``vethXXXX`` interface is the host side of a point-to-point link
between the host and the corresponding container; the other side of
the link is the container's ``eth0``
interface. This pair (host ``vethXXX`` and container ``eth0``) are
connected like a tube. Everything that comes in one side will come out
the other side.
All the plumbing is delegated to Linux network capabilities (check the
ip link command) and the namespaces infrastructure.
I want more
------------
Jérôme Petazzoni has created ``pipework`` to connect together
containers in arbitrarily complex scenarios :
https://github.com/jpetazzo/pipework

View file

@ -1,152 +0,0 @@
:title: Redirect Ports
:description: usage about port redirection
:keywords: Usage, basic port, docker, documentation, examples
.. _port_redirection:
Redirect Ports
==============
Interacting with a service is commonly done through a connection to a
port. When this service runs inside a container, one can connect to
the port after finding the IP address of the container as follows:
.. code-block:: bash
# Find IP address of container with ID <container_id>
docker inspect <container_id> | grep IPAddress | cut -d '"' -f 4
However, this IP address is local to the host system and the container
port is not reachable by the outside world. Furthermore, even if the
port is used locally, e.g. by another container, this method is
tedious as the IP address of the container changes every time it
starts.
Docker addresses these two problems and give a simple and robust way
to access services running inside containers.
To allow non-local clients to reach the service running inside the
container, Docker provide ways to bind the container port to an
interface of the host system. To simplify communication between
containers, Docker provides the linking mechanism.
Auto map all exposed ports on the host
--------------------------------------
To bind all the exposed container ports to the host automatically, use
``docker run -P <imageid>``. The mapped host ports will be auto-selected
from a pool of unused ports (49000..49900), and you will need to use
``docker ps``, ``docker inspect <container_id>`` or
``docker port <container_id> <port>`` to determine what they are.
Binding a port to a host interface
-----------------------------------
To bind a port of the container to a specific interface of the host
system, use the ``-p`` parameter of the ``docker run`` command:
.. code-block:: bash
# General syntax
docker run -p [([<host_interface>:[host_port]])|(<host_port>):]<container_port>[/udp] <image> <cmd>
When no host interface is provided, the port is bound to all available
interfaces of the host machine (aka INADDR_ANY, or 0.0.0.0).When no host port is
provided, one is dynamically allocated. The possible combinations of options for
TCP port are the following:
.. code-block:: bash
# Bind TCP port 8080 of the container to TCP port 80 on 127.0.0.1 of the host machine.
docker run -p 127.0.0.1:80:8080 <image> <cmd>
# Bind TCP port 8080 of the container to a dynamically allocated TCP port on 127.0.0.1 of the host machine.
docker run -p 127.0.0.1::8080 <image> <cmd>
# Bind TCP port 8080 of the container to TCP port 80 on all available interfaces of the host machine.
docker run -p 80:8080 <image> <cmd>
# Bind TCP port 8080 of the container to a dynamically allocated TCP port on all available interfaces of the host machine.
docker run -p 8080 <image> <cmd>
UDP ports can also be bound by adding a trailing ``/udp``. All the
combinations described for TCP work. Here is only one example:
.. code-block:: bash
# Bind UDP port 5353 of the container to UDP port 53 on 127.0.0.1 of the host machine.
docker run -p 127.0.0.1:53:5353/udp <image> <cmd>
The command ``docker port`` lists the interface and port on the host
machine bound to a given container port. It is useful when using
dynamically allocated ports:
.. code-block:: bash
# Bind to a dynamically allocated port
docker run -p 127.0.0.1::8080 --name dyn-bound <image> <cmd>
# Lookup the actual port
docker port dyn-bound 8080
127.0.0.1:49160
Linking a container
-------------------
Communication between two containers can also be established in a
docker-specific way called linking.
To briefly present the concept of linking, let us consider two
containers: ``server``, containing the service, and ``client``,
accessing the service. Once ``server`` is running, ``client`` is
started and links to server. Linking sets environment variables in
``client`` giving it some information about ``server``. In this sense,
linking is a method of service discovery.
Let us now get back to our topic of interest; communication between
the two containers. We mentioned that the tricky part about this
communication was that the IP address of ``server`` was not
fixed. Therefore, some of the environment variables are going to be
used to inform ``client`` about this IP address. This process called
exposure, is possible because ``client`` is started after ``server``
has been started.
Here is a full example. On ``server``, the port of interest is
exposed. The exposure is done either through the ``--expose`` parameter
to the ``docker run`` command, or the ``EXPOSE`` build command in a
Dockerfile:
.. code-block:: bash
# Expose port 80
docker run --expose 80 --name server <image> <cmd>
The ``client`` then links to the ``server``:
.. code-block:: bash
# Link
docker run --name client --link server:linked-server <image> <cmd>
``client`` locally refers to ``server`` as ``linked-server``. The
following environment variables, among others, are available on
``client``:
.. code-block:: bash
# The default protocol, ip, and port of the service running in the container
LINKED-SERVER_PORT=tcp://172.17.0.8:80
# A specific protocol, ip, and port of various services
LINKED-SERVER_PORT_80_TCP=tcp://172.17.0.8:80
LINKED-SERVER_PORT_80_TCP_PROTO=tcp
LINKED-SERVER_PORT_80_TCP_ADDR=172.17.0.8
LINKED-SERVER_PORT_80_TCP_PORT=80
This tells ``client`` that a service is running on port 80 of
``server`` and that ``server`` is accessible at the IP address
172.17.0.8
Note: Using the ``-p`` parameter also exposes the port.

View file

@ -1,117 +0,0 @@
:title: Puppet Usage
:description: Installating and using Puppet
:keywords: puppet, installation, usage, docker, documentation
.. _install_using_puppet:
Using Puppet
=============
.. note::
Please note this is a community contributed installation path. The
only 'official' installation is using the :ref:`ubuntu_linux`
installation path. This version may sometimes be out of date.
Requirements
------------
To use this guide you'll need a working installation of Puppet from
`Puppetlabs <https://www.puppetlabs.com>`_ .
The module also currently uses the official PPA so only works with Ubuntu.
Installation
------------
The module is available on the `Puppet Forge
<https://forge.puppetlabs.com/garethr/docker/>`_ and can be installed
using the built-in module tool.
.. code-block:: bash
puppet module install garethr/docker
It can also be found on `GitHub
<https://www.github.com/garethr/garethr-docker>`_ if you would rather
download the source.
Usage
-----
The module provides a puppet class for installing Docker and two defined types
for managing images and containers.
Installation
~~~~~~~~~~~~
.. code-block:: ruby
include 'docker'
Images
~~~~~~
The next step is probably to install a Docker image. For this, we have a
defined type which can be used like so:
.. code-block:: ruby
docker::image { 'ubuntu': }
This is equivalent to running:
.. code-block:: bash
docker pull ubuntu
Note that it will only be downloaded if an image of that name does
not already exist. This is downloading a large binary so on first
run can take a while. For that reason this define turns off the
default 5 minute timeout for the exec type. Note that you can also
remove images you no longer need with:
.. code-block:: ruby
docker::image { 'ubuntu':
ensure => 'absent',
}
Containers
~~~~~~~~~~
Now you have an image where you can run commands within a container
managed by Docker.
.. code-block:: ruby
docker::run { 'helloworld':
image => 'ubuntu',
command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"',
}
This is equivalent to running the following command, but under upstart:
.. code-block:: bash
docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done"
Run also contains a number of optional parameters:
.. code-block:: ruby
docker::run { 'helloworld':
image => 'ubuntu',
command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"',
ports => ['4444', '4555'],
volumes => ['/var/lib/couchdb', '/var/log'],
volumes_from => '6446ea52fbc9',
memory_limit => 10485760, # bytes
username => 'example',
hostname => 'example.com',
env => ['FOO=BAR', 'FOO2=BAR2'],
dns => ['8.8.8.8', '8.8.4.4'],
}
Note that ports, env, dns and volumes can be set with either a single string
or as above with an array of values.

View file

@ -1,132 +0,0 @@
:title: Link Containers
:description: How to create and use both links and names
:keywords: Examples, Usage, links, linking, docker, documentation, examples, names, name, container naming
.. _working_with_links_names:
Link Containers
===============
From version 0.6.5 you are now able to ``name`` a container and
``link`` it to another container by referring to its name. This will
create a parent -> child relationship where the parent container can
see selected information about its child.
.. _run_name:
Container Naming
----------------
.. versionadded:: v0.6.5
You can now name your container by using the ``--name`` flag. If no
name is provided, Docker will automatically generate a name. You can
see this name using the ``docker ps`` command.
.. code-block:: bash
# format is "sudo docker run --name <container_name> <image_name> <command>"
$ sudo docker run --name test ubuntu /bin/bash
# the flag "-a" Show all containers. Only running containers are shown by default.
$ sudo docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
2522602a0d99 ubuntu:12.04 /bin/bash 14 seconds ago Exit 0 test
.. _run_link:
Links: service discovery for docker
-----------------------------------
.. versionadded:: v0.6.5
Links allow containers to discover and securely communicate with each
other by using the flag ``--link name:alias``. Inter-container
communication can be disabled with the daemon flag
``--icc=false``. With this flag set to ``false``, Container A cannot
access Container B unless explicitly allowed via a link. This is a
huge win for securing your containers. When two containers are linked
together Docker creates a parent child relationship between the
containers. The parent container will be able to access information
via environment variables of the child such as name, exposed ports, IP
and other selected environment variables.
When linking two containers Docker will use the exposed ports of the
container to create a secure tunnel for the parent to access. If a
database container only exposes port 8080 then the linked container
will only be allowed to access port 8080 and nothing else if
inter-container communication is set to false.
For example, there is an image called ``crosbymichael/redis`` that exposes the
port 6379 and starts the Redis server. Let's name the container as ``redis``
based on that image and run it as a daemon.
.. code-block:: bash
$ sudo docker run -d --name redis crosbymichael/redis
We can issue all the commands that you would expect using the name
``redis``; start, stop, attach, using the name for our container. The
name also allows us to link other containers into this one.
Next, we can start a new web application that has a dependency on
Redis and apply a link to connect both containers. If you noticed when
running our Redis server we did not use the ``-p`` flag to publish the
Redis port to the host system. Redis exposed port 6379 and this is all
we need to establish a link.
.. code-block:: bash
$ sudo docker run -t -i --link redis:db --name webapp ubuntu bash
When you specified ``--link redis:db`` you are telling Docker to link
the container named ``redis`` into this new container with the alias
``db``. Environment variables are prefixed with the alias so that the
parent container can access network and environment information from
the containers that are linked into it.
If we inspect the environment variables of the second container, we
would see all the information about the child container.
.. code-block:: bash
$ root@4c01db0b339c:/# env
HOSTNAME=4c01db0b339c
DB_NAME=/webapp/db
TERM=xterm
DB_PORT=tcp://172.17.0.8:6379
DB_PORT_6379_TCP=tcp://172.17.0.8:6379
DB_PORT_6379_TCP_PROTO=tcp
DB_PORT_6379_TCP_ADDR=172.17.0.8
DB_PORT_6379_TCP_PORT=6379
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/
SHLVL=1
HOME=/
container=lxc
_=/usr/bin/env
root@4c01db0b339c:/#
Accessing the network information along with the environment of the
child container allows us to easily connect to the Redis service on
the specific IP and port in the environment.
.. note::
These Environment variables are only set for the first process in
the container. Similarly, some daemons (such as ``sshd``) will
scrub them when spawning shells for connection.
You can work around this by storing the initial ``env`` in a file,
or looking at ``/proc/1/environ``.
Running ``docker ps`` shows the 2 containers, and the ``webapp/db``
alias name for the Redis container.
.. code-block:: bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db

View file

@ -1,164 +0,0 @@
:title: Share Directories via Volumes
:description: How to create and share volumes
:keywords: Examples, Usage, volume, docker, documentation, examples
.. _volume_def:
Share Directories via Volumes
=============================
A *data volume* is a specially-designated directory within one or more
containers that bypasses the :ref:`ufs_def` to provide several useful
features for persistent or shared data:
* **Data volumes can be shared and reused between containers.** This
is the feature that makes data volumes so powerful. You can use it
for anything from hot database upgrades to custom backup or
replication tools. See the example below.
* **Changes to a data volume are made directly**, without the overhead
of a copy-on-write mechanism. This is good for very large files.
* **Changes to a data volume will not be included at the next commit**
because they are not recorded as regular filesystem changes in the
top layer of the :ref:`ufs_def`
* **Volumes persist until no containers use them** as they are a reference
counted resource. The container does not need to be running to share its
volumes, but running it can help protect it against accidental removal
via ``docker rm``.
Each container can have zero or more data volumes.
.. versionadded:: v0.3.0
Getting Started
...............
Using data volumes is as simple as adding a ``-v`` parameter to the ``docker run``
command. The ``-v`` parameter can be used more than once in order to
create more volumes within the new container. To create a new container with
two new volumes::
$ docker run -v /var/volume1 -v /var/volume2 busybox true
This command will create the new container with two new volumes that
exits instantly (``true`` is pretty much the smallest, simplest program
that you can run). Once created you can mount its volumes in any other
container using the ``--volumes-from`` option; irrespective of whether the
container is running or not.
Or, you can use the VOLUME instruction in a Dockerfile to add one or more new
volumes to any container created from that image::
# BUILD-USING: docker build -t data .
# RUN-USING: docker run --name DATA data
FROM busybox
VOLUME ["/var/volume1", "/var/volume2"]
CMD ["/bin/true"]
Creating and mounting a Data Volume Container
---------------------------------------------
If you have some persistent data that you want to share between containers,
or want to use from non-persistent containers, its best to create a named
Data Volume Container, and then to mount the data from it.
Create a named container with volumes to share (``/var/volume1`` and ``/var/volume2``)::
$ docker run -v /var/volume1 -v /var/volume2 --name DATA busybox true
Then mount those data volumes into your application containers::
$ docker run -t -i --rm --volumes-from DATA --name client1 ubuntu bash
You can use multiple ``--volumes-from`` parameters to bring together multiple
data volumes from multiple containers.
Interestingly, you can mount the volumes that came from the ``DATA`` container in
yet another container via the ``client1`` middleman container::
$ docker run -t -i --rm --volumes-from client1 --name client2 ubuntu bash
This allows you to abstract the actual data source from users of that data,
similar to :ref:`ambassador_pattern_linking <ambassador_pattern_linking>`.
If you remove containers that mount volumes, including the initial DATA container,
or the middleman, the volumes will not be deleted until there are no containers still
referencing those volumes. This allows you to upgrade, or effectively migrate data volumes
between containers.
Mount a Host Directory as a Container Volume:
---------------------------------------------
::
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
You must specify an absolute path for ``host-dir``.
If ``host-dir`` is missing from the command, then docker creates a new volume.
If ``host-dir`` is present but points to a non-existent directory on the host,
Docker will automatically create this directory and use it as the source of the
bind-mount.
Note that this is not available from a Dockerfile due the portability and
sharing purpose of it. The ``host-dir`` volumes are entirely host-dependent and
might not work on any other machine.
For example::
sudo docker run -t -i -v /var/logs:/var/host_logs:ro ubuntu bash
The command above mounts the host directory ``/var/logs`` into the
container with read only permissions as ``/var/host_logs``.
.. versionadded:: v0.5.0
Note for OS/X users and remote daemon users:
--------------------------------------------
OS/X users run ``boot2docker`` to create a minimalist virtual machine running the docker daemon. That
virtual machine then launches docker commands on behalf of the OS/X command line. The means that ``host
directories`` refer to directories in the ``boot2docker`` virtual machine, not the OS/X filesystem.
Similarly, anytime when the docker daemon is on a remote machine, the ``host directories`` always refer to directories on the daemon's machine.
Backup, restore, or migrate data volumes
----------------------------------------
You cannot back up volumes using ``docker export``, ``docker save`` and ``docker cp``
because they are external to images.
Instead you can use ``--volumes-from`` to start a new container that can access the
data-container's volume. For example::
$ sudo docker run --rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data
* ``--rm`` - remove the container when it exits
* ``--volumes-from DATA`` - attach to the volumes shared by the ``DATA`` container
* ``-v $(pwd):/backup`` - bind mount the current directory into the container; to write the tar file to
* ``busybox`` - a small simpler image - good for quick maintenance
* ``tar cvf /backup/backup.tar /data`` - creates an uncompressed tar file of all the files in the ``/data`` directory
Then to restore to the same container, or another that you've made elsewhere::
# create a new data container
$ sudo docker run -v /data --name DATA2 busybox true
# untar the backup files into the new container's data volume
$ sudo docker run --rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar
data/
data/sven.txt
# compare to the original container
$ sudo docker run --rm --volumes-from DATA -v `pwd`:/backup busybox ls /data
sven.txt
You can use the basic techniques above to automate backup, migration and restore
testing using your preferred tools.
Known Issues
............
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
could indicate a permissions problem with AppArmor. Please see the
issue for a workaround.
* :issue:`2528`: the busybox container is used to make the resulting container as small and
simple as possible - whenever you need to interact with the data in the volume
you mount it into another container.

View file

@ -1,256 +0,0 @@
:title: Share Images via Repositories
:description: Repositories allow users to share images.
:keywords: repo, repositories, usage, pull image, push image, image, documentation
.. _working_with_the_repository:
Share Images via Repositories
=============================
A *repository* is a shareable collection of tagged :ref:`images<image_def>`
that together create the file systems for containers. The
repository's name is a label that indicates the provenance of the
repository, i.e. who created it and where the original copy is
located.
You can find one or more repositories hosted on a *registry*. There
can be an implicit or explicit host name as part of the repository
tag. The implicit registry is located at ``index.docker.io``, the home
of "top-level" repositories and the Central Index. This registry may
also include public "user" repositories.
Docker is not only a tool for creating and managing your own
:ref:`containers <container_def>` -- **Docker is also a tool for
sharing**. The Docker project provides a Central Registry to host
public repositories, namespaced by user, and a Central Index which
provides user authentication and search over all the public
repositories. You can host your own Registry too! Docker acts as a
client for these services via ``docker search, pull, login`` and
``push``.
Local Repositories
------------------
Docker images which have been created and labeled on your local Docker server
need to be pushed to a Public or Private registry to be shared.
.. _using_public_repositories:
Public Repositories
-------------------
There are two types of public repositories: *top-level* repositories
which are controlled by the Docker team, and *user* repositories
created by individual contributors. Anyone can read from these
repositories -- they really help people get started quickly! You could
also use :ref:`using_private_repositories` if you need to keep control
of who accesses your images, but we will only refer to public
repositories in these examples.
* Top-level repositories can easily be recognized by **not** having a
``/`` (slash) in their name. These repositories can generally be
trusted.
* User repositories always come in the form of
``<username>/<repo_name>``. This is what your published images will
look like if you push to the public Central Registry.
* Only the authenticated user can push to their *username* namespace
on the Central Registry.
* User images are not checked, it is therefore up to you whether or
not you trust the creator of this image.
.. _searching_central_index:
Find Public Images on the Central Index
---------------------------------------
You can search the Central Index `online <https://index.docker.io>`_
or using the command line interface. Searching can find images by name, user
name or description:
.. code-block:: bash
$ sudo docker help search
Usage: docker search NAME
Search the docker index for images
--no-trunc=false: Don't truncate output
$ sudo docker search centos
Found 25 results matching your query ("centos")
NAME DESCRIPTION
centos
slantview/centos-chef-solo CentOS 6.4 with chef-solo.
...
There you can see two example results: ``centos`` and
``slantview/centos-chef-solo``. The second result shows that it comes
from the public repository of a user, ``slantview/``, while the first
result (``centos``) doesn't explicitly list a repository so it comes
from the trusted Central Repository. The ``/`` character separates a
user's repository and the image name.
Once you have found the image name, you can download it:
.. code-block:: bash
# sudo docker pull <value>
$ sudo docker pull centos
Pulling repository centos
539c0211cd76: Download complete
What can you do with that image? Check out the :ref:`example_list`
and, when you're ready with your own image, come back here to learn
how to share it.
Contributing to the Central Registry
------------------------------------
Anyone can pull public images from the Central Registry, but if you
would like to share one of your own images, then you must register a
unique user name first. You can create your username and login on the
`central Docker Index online
<https://index.docker.io/account/signup/>`_, or by running
.. code-block:: bash
sudo docker login
This will prompt you for a username, which will become a public
namespace for your public repositories.
If your username is available then ``docker`` will also prompt you to
enter a password and your e-mail address. It will then automatically
log you in. Now you're ready to commit and push your own images!
.. _container_commit:
Committing a Container to a Named Image
---------------------------------------
When you make changes to an existing image, those changes get saved to
a container's file system. You can then promote that container to
become an image by making a ``commit``. In addition to converting the
container to an image, this is also your opportunity to name the
image, specifically a name that includes your user name from the
Central Docker Index (as you did a ``login`` above) and a meaningful
name for the image.
.. code-block:: bash
# format is "sudo docker commit <container_id> <username>/<imagename>"
$ sudo docker commit $CONTAINER_ID myname/kickassapp
.. _image_push:
Pushing a repository to its registry
------------------------------------
In order to push an repository to its registry you need to have named an image,
or committed your container to a named image (see above)
Now you can push this repository to the registry designated by its name
or tag.
.. code-block:: bash
# format is "docker push <username>/<repo_name>"
$ sudo docker push myname/kickassapp
.. _using_private_repositories:
Trusted Builds
--------------
Trusted Builds automate the building and updating of images from GitHub, directly
on ``docker.io`` servers. It works by adding a commit hook to your selected repository,
triggering a build and update when you push a commit.
To setup a trusted build
++++++++++++++++++++++++
#. Create a `Docker Index account <https://index.docker.io/>`_ and login.
#. Link your GitHub account through the ``Link Accounts`` menu.
#. `Configure a Trusted build <https://index.docker.io/builds/>`_.
#. Pick a GitHub project that has a ``Dockerfile`` that you want to build.
#. Pick the branch you want to build (the default is the ``master`` branch).
#. Give the Trusted Build a name.
#. Assign an optional Docker tag to the Build.
#. Specify where the ``Dockerfile`` is located. The default is ``/``.
Once the Trusted Build is configured it will automatically trigger a build, and
in a few minutes, if there are no errors, you will see your new trusted build
on the Docker Index. It will will stay in sync with your GitHub repo until you
deactivate the Trusted Build.
If you want to see the status of your Trusted Builds you can go to your
`Trusted Builds page <https://index.docker.io/builds/>`_ on the Docker index,
and it will show you the status of your builds, and the build history.
Once you've created a Trusted Build you can deactivate or delete it. You cannot
however push to a Trusted Build with the ``docker push`` command. You can only
manage it by committing code to your GitHub repository.
You can create multiple Trusted Builds per repository and configure them to
point to specific ``Dockerfile``'s or Git branches.
Private Registry
----------------
Private registries and private shared repositories are
only possible by hosting `your own registry
<https://github.com/dotcloud/docker-registry>`_. To push or pull to a
repository on your own registry, you must prefix the tag with the
address of the registry's host (a ``.`` or ``:`` is used to identify a host),
like this:
.. code-block:: bash
# Tag to create a repository with the full registry location.
# The location (e.g. localhost.localdomain:5000) becomes
# a permanent part of the repository name
sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name
# Push the new repository to its home location on localhost
sudo docker push localhost.localdomain:5000/repo_name
Once a repository has your registry's host name as part of the tag,
you can push and pull it like any other repository, but it will
**not** be searchable (or indexed at all) in the Central Index, and
there will be no user name checking performed. Your registry will
function completely independently from the Central Index.
.. raw:: html
<iframe width="640" height="360"
src="//www.youtube.com/embed/CAewZCBT4PI?rel=0" frameborder="0"
allowfullscreen></iframe>
.. seealso:: `Docker Blog: How to use your own registry
<http://blog.docker.io/2013/07/how-to-use-your-own-registry/>`_
Authentication file
-------------------
The authentication is stored in a json file, ``.dockercfg`` located in your
home directory. It supports multiple registry urls.
``docker login`` will create the "https://index.docker.io/v1/" key.
``docker login https://my-registry.com`` will create the "https://my-registry.com" key.
For example:
.. code-block:: json
{
"https://index.docker.io/v1/": {
"auth": "xXxXxXxXxXx=",
"email": "email@example.com"
},
"https://my-registry.com": {
"auth": "XxXxXxXxXxX=",
"email": "email@my-registry.com"
}
}
The ``auth`` field represents ``base64(<username>:<password>)``

View file

@ -1,78 +0,0 @@
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="google-site-verification" content="UxV66EKuPe87dgnH1sbrldrx6VsoWMrx5NjwkgUFxXI" />
<meta name="google-site-verification" content="XzwpAUE5-gjq6j2F0dDqiBYxCZpHd8uVYe5Fnyt3V8Q" />
<title>{{ meta['title'] if meta and meta['title'] else title }} - Docker Documentation</title>
<meta name="description" content="{{ meta['description'] if meta }}" />
<meta name="keywords" content="{{ meta['keywords'] if meta }}" />
<!-- Swiftype tags: https://swiftype.com/documentation/meta_tags -->
<meta property='st:popularity' content='4' />
<meta property='st:type' content='docker_doc' />
{%- set url_root = pathto('', 1) %}
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
{%- if current_version == 'latest' %}
{% set github_tag = 'master' %}
{% else %}
{% set github_tag = current_version %}
{% endif %}
<script type="text/javascript">
// This is included here for Javascript that doesn't have access to the templates.
var doc_version = "{{ current_version }}";
var doc_slug = "{{ slug }}";
</script>
{%- set css_files = css_files + ['_static/css/bootstrap.css'] %}
{%- set css_files = css_files + ['_static/pygments.css'] %}
{%- set css_files = css_files + ['_static/css/main.css'] %}
{%- set script_files =
['//code.jquery.com/jquery-1.10.1.min.js']
+ ['//fonts.googleapis.com/css?family=Cabin:400,700,400italic']
%}
{#
This part is hopefully complex because things like |cut '/index/' are not available in Sphinx jinja
and will make it crash. (and we need index/ out.
#}
<link rel="canonical" href="http://docs.docker.io/en/latest/
{%- for word in pagename.split('/') -%}
{%- if word != 'index' -%}
{%- if word != '' -%}
{{ word }}/
{%- endif -%}
{%- endif -%}
{%- endfor -%}
">
{%- for cssfile in css_files %}
<link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
{%- endfor %}
{%- for scriptfile in script_files if scriptfile != '_static/jquery.js' %}
<script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
{%- endfor %}
<link rel="shortcut icon" href="{{ pathto('_static/favicon.png', 1) }}"/>
{%- block extrahead %}{% endblock %}
</head>
<body>
<!-- body block -->
<div class="main-content">
<!-- Main section
================================================== -->
<section id="global" class="containerblock">
{% block body %}{% endblock %}
</section>
</div>
</body>
</html>

View file

@ -1,12 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>Page Moved</title>
<meta http-equiv="refresh" content="0; url=http://docs.docker.io/en/latest/use/builder/">
</head>
<body>
This page has moved. Perhaps you should visit the <a href="http://docs.docker.io/en/latest/use/builder/" title="builder page">Builder page</a>
</body>
</html>

View file

@ -1,12 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>Page Moved</title>
<meta http-equiv="refresh" content="0; url=http://docs.docker.io/en/latest/">
</head>
<body>
This page has moved. Perhaps you should visit the <a href="http://docs.docker.io/" title="documentation homepage">Documentation Homepage</a>
</body>
</html>

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

View file

@ -1,477 +0,0 @@
.debug {
border: 2px dotted red !important;
box-sizing: border-box;
-moz-box-sizing: border-box;
}
body {
min-width: 940px;
font-family: "Cabin", "Helvetica Neue", Helvetica, Arial, sans-serif;
}
p a {
text-decoration: underline;
}
p a.btn {
text-decoration: none;
}
.brand.logo a {
text-decoration: none;
}
.navbar .navbar-inner {
padding-left: 0px;
padding-right: 0px;
}
.navbar .nav li a {
padding: 24.2857142855px 17px 24.2857142855px;
color: #777777;
text-decoration: none;
text-shadow: 0 1px 0 #f2f2f2;
}
.navbar .nav > li {
float: left;
}
.nav-underline {
height: 6px;
background-color: #71afc0;
}
.nav-login li a {
color: white;
padding: 10px 15px 10px;
}
.navbar .brand {
margin-left: 0px;
float: left;
display: block;
}
.navbar-inner {
min-height: 70px;
padding-left: 20px;
padding-right: 20px;
background-color: #ededed;
background-image: -moz-linear-gradient(top, #f2f2f2, #e5e5e5);
background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f2f2f2), to(#e5e5e5));
background-image: -webkit-linear-gradient(top, #f2f2f2, #e5e5e5);
background-image: -o-linear-gradient(top, #f2f2f2, #e5e5e5);
background-image: linear-gradient(to bottom, #f2f2f2, #e5e5e5);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2f2f2', endColorstr='#ffe5e5e5', GradientType=0);
border: 1px solid #c7c7c7;
-webkit-border-radius: 4px;
-moz-border-radius: 4px;
border-radius: 4px;
-webkit-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
-moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
}
.brand-logo a {
color: white;
}
.brand-logo a img {
width: auto;
}
.inline-icon {
margin-bottom: 6px;
}
.row {
margin-top: 15px;
margin-bottom: 15px;
}
div[class*='span'] {
-moz-box-sizing: border-box;
box-sizing: border-box;
}
.box {
padding: 30px;
background-color: white;
margin-top: 8px;
}
.paper {
background-color: white;
padding-top: 30px;
padding-bottom: 30px;
}
.copy-headline {
margin-top: 0px;
}
.box h1,
.box h2,
.box h3,
.box h4 {
margin-top: -5px;
}
.nested {
padding: 30px;
}
.box.div {
padding: 30px;
}
span.read-more {
margin-left: 15px;
white-space: nowrap;
}
.forcetopalign {
margin-top: 15px !important;
}
.forcetopmargin {
margin-top: 23px !important;
}
.forceleftalign {
margin-left: 15px !important;
}
.forceleftmargin {
margin-left: 21px !important;
}
.textcenter {
text-align: center;
}
.textright {
text-align: right;
}
.textsmaller {
font-size: 12px;
}
.modal-backdrop {
opacity: 0.4;
}
/* generic page copy styles */
.copy-headline h1 {
font-size: 21px;
}
/* =======================
Sticky footer
======================= */
html,
body {
height: 100%;
/* The html and body elements cannot have any padding or margin. */
}
/* Wrapper for page content to push down footer */
#wrap {
min-height: 100%;
height: auto !important;
height: 100%;
/* Negative indent footer by it's height */
margin: 0 auto -280px;
}
/* Set the fixed height of the footer here */
#push-the-footer,
#footer {
height: 280px;
}
.main-row {
padding-top: 50px;
}
#footer .footer {
margin-top: 160px;
}
#footer .footer .ligaturesymbols {
font-size: 30px;
color: black;
}
#footer .footer .ligaturesymbols a {
color: black;
}
#footer .footer .footerlist h3,
#footer .footer .footerlist h4 {
/* correct the top alignment */
margin-top: 0px;
}
.footer-landscape-image {
position: absolute:
bottom: 0;
margin-bottom: 0;
background-image: url('https://www.docker.io/static/img/website-footer_clean.svg');
background-repeat: repeat-x;
height: 280px;
}
.main-row {
margin-top: 40px;
}
.sidebar {
width: 215px;
float: left;
}
.main-content {
padding: 16px 18px inherit;
margin-left: 230px;
/* space for sidebar */
}
/* =======================
Social footer
======================= */
.social {
margin-left: 0px;
margin-top: 15px;
}
.social .twitter,
.social .github,
.social .googleplus,
.social .facebook,
.social .slideshare,
.social .linkedin,
.social .flickr,
.social .youtube,
.social .reddit {
background: url("../img/social/docker_social_logos.png") no-repeat transparent;
display: inline-block;
height: 32px;
overflow: hidden;
text-indent: 9999px;
width: 32px;
margin-right: 5px;
}
.social :hover {
-webkit-transform: rotate(-10deg);
-moz-transform: rotate(-10deg);
-o-transform: rotate(-10deg);
-ms-transform: rotate(-10deg);
transform: rotate(-10deg);
}
.social .twitter {
background-position: -160px 0px;
}
.social .reddit {
background-position: -256px 0px;
}
.social .github {
background-position: -64px 0px;
}
.social .googleplus {
background-position: -96px 0px;
}
.social .facebook {
background-position: 0px 0px;
}
.social .slideshare {
background-position: -128px 0px;
}
.social .youtube {
background-position: -192px 0px;
}
.social .flickr {
background-position: -32px 0px;
}
.social .linkedin {
background-position: -224px 0px;
}
form table th {
vertical-align: top;
text-align: right;
white-space: nowrap;
}
form .labeldd label {
font-weight: bold;
}
form .helptext {
font-size: 12px;
margin-top: -4px;
margin-bottom: 10px;
}
form .fielddd input {
width: 250px;
}
form .error {
color: #a30000;
}
div.alert.alert-block {
margin-bottom: 15px;
}
/* ======================= =======================
Documentation
========================= ========================= */
/* =======================
Styles for the sidebar
========================= */
.page-title {
background-color: white;
border: 1px solid transparent;
text-align: center;
width: 100%;
}
.page-title h4 {
font-size: 20px;
}
.bs-docs-sidebar {
padding-left: 5px;
max-width: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
margin-top: 18px;
}
.bs-docs-sidebar ul {
list-style: none;
margin-left: 0px;
}
.bs-docs-sidebar .toctree-l2 > ul {
width: 100%;
}
.bs-docs-sidebar ul > li.toctree-l1.has-children {
background-image: url('../img/menu_arrow_right.gif');
background-repeat: no-repeat;
background-position: 13px 13px;
list-style-type: none;
padding: 0px 0px 0px 0px;
vertical-align: middle;
}
.bs-docs-sidebar ul > li.toctree-l1.has-children.open {
background-image: url('../img/menu_arrow_down.gif');
}
.bs-docs-sidebar ul > li > a {
box-sizing: border-box;
-moz-box-sizing: border-box;
width: 100%;
display: inline-block;
padding-top: 8px;
padding-bottom: 8px;
padding-left: 35px;
padding-right: 20px;
font-size: 14px;
border-bottom: 1.5px solid #595959;
line-height: 20px;
}
.bs-docs-sidebar ul > li:first-child.active > a {
border-top: 1.5px solid #595959;
}
.bs-docs-sidebar ul > li:last-child > a {
border-bottom: none;
}
.bs-docs-sidebar ul > li:last-child.active > a {
border-bottom: 1.5px solid #595959;
}
.bs-docs-sidebar ul > li.active > a {
border-right: 1.5px solid #595959;
border-left: 1.5px solid #595959;
color: #394d54;
}
.bs-docs-sidebar ul > li:hover {
background-color: #e8e8e8;
}
.bs-docs-sidebar.toctree-l3 ul {
display: inherit;
margin-left: 15px;
font-size: smaller;
}
.bs-docs-sidebar .toctree-l3 a {
border: none;
font-size: 12px;
line-height: 15px;
}
.bs-docs-sidebar ul > li > ul {
display: none;
}
.bs-docs-sidebar ul > li.current > ul {
display: inline-block;
padding-left: 0px;
width: 100%;
}
.toctree-l2.current > a {
font-weight: bold;
}
.toctree-l2.current {
border: 1.5px solid #595959;
color: #394d54;
}
/* =====================================
Styles for the floating version widget
====================================== */
.version-flyer {
position: fixed;
float: right;
right: 0;
bottom: 40px;
background-color: #E0E0E0;
border: 1px solid #88BABC;
padding: 5px;
font-size: larger;
max-width: 300px;
}
.version-flyer .content {
padding-right: 45px;
margin-top: 7px;
margin-left: 7px;
background-image: url('../img/container3.png');
background-position: right center;
background-repeat: no-repeat;
}
.version-flyer .active-slug {
visibility: visible;
display: inline-block;
font-weight: bolder;
}
.version-flyer:hover .alternative {
animation-duration: 1s;
display: inline-block;
}
.version-flyer .version-note {
font-size: 16px;
color: black;
}
/* =====================================
Styles for
====================================== */
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
h4:hover > a.headerlink,
h5:hover > a.headerlink,
h6:hover > a.headerlink,
dt:hover > a.headerlink {
visibility: visible;
}
.headerlink {
font-size: smaller;
color: #666;
font-weight: bold;
float: right;
visibility: hidden;
}
h2, h3, h4, h5, h6 {
margin-top: 0.7em;
}
/* =====================================
Miscellaneous information
====================================== */
.admonition.warning,
.admonition.note,
.admonition.seealso,
.admonition.todo {
border: 3px solid black;
padding: 10px;
margin: 5px auto 10px;
}
.admonition .admonition-title {
font-size: larger;
}
.admonition.warning,
.admonition.danger {
border-color: #ac0004;
}
.admonition.note {
border-color: #cbc200;
}
.admonition.todo {
border-color: orange;
}
.admonition.seealso {
border-color: #23cb1f;
}
/* Add styles for other types of comments */
.versionchanged,
.versionadded,
.versionmodified,
.deprecated {
font-size: larger;
font-weight: bold;
}
.versionchanged {
color: lightseagreen;
}
.versionadded {
color: mediumblue;
}
.deprecated {
color: orangered;
}

View file

@ -1,691 +0,0 @@
// Main CSS configuration file
// by Thatcher Peskens, thatcher@dotcloud.com
//
// Please note variables.less is customized to include custom font, background-color, and link colors.
@import "variables.less";
// Variables for main.less
// -----------------------
@box-top-margin: 8px;
@box-padding-size: 30px;
@docker-background-color: #71AFC0;
@very-dark-sea-green: #394D54;
// Custom colors for Docker
// --------------------------
@gray-super-light: #F2F2F2;
@deep-red: #A30000;
@deep-blue: #1B2033;
@deep-green: #007035;
@link-blue: #213B8F;
.debug {
border: 2px dotted red !important;
box-sizing: border-box;
-moz-box-sizing: border-box;
}
// Other custom colors for Docker
// --------------------------
// ** are defined in sources/less/variables **
//@import "bootstrap/variables.less";
// Styles generic for each and every page
// ----------------------------------- // -----------------------------------
// moving body down to make place for fixed navigation
body {
min-width: 940px;
font-family: @font-family-base;
}
p a {
text-decoration: underline;
&.btn {
text-decoration: none;
}
}
.brand.logo a {
text-decoration: none;
}
// Styles for top navigation
// ----------------------------------
.navbar .navbar-inner {
padding-left: 0px;
padding-right: 0px;
}
.navbar .nav {
li a {
padding: ((@navbar-height - @line-height-base) / 2) 17px ((@navbar-height - @line-height-base) / 2);
color: #777777;
text-decoration: none;
text-shadow: 0 1px 0 #f2f2f2;
}
}
.navbar .nav > li {
float: left;
}
.nav-underline {
height: 6px;
background-color: @docker-background-color;
}
.nav-login {
li {
a {
color: white;
padding: 10px 15px 10px;
}
}
}
.navbar .brand {
margin-left: 0px;
float: left;
display: block;
}
.navbar-inner {
min-height: 70px;
padding-left: 20px;
padding-right: 20px;
background-color: #ededed;
background-image: -moz-linear-gradient(top, #f2f2f2, #e5e5e5);
background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f2f2f2), to(#e5e5e5));
background-image: -webkit-linear-gradient(top, #f2f2f2, #e5e5e5);
background-image: -o-linear-gradient(top, #f2f2f2, #e5e5e5);
background-image: linear-gradient(to bottom, #f2f2f2, #e5e5e5);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2f2f2', endColorstr='#ffe5e5e5', GradientType=0);
border: 1px solid #c7c7c7;
-webkit-border-radius: 4px;
-moz-border-radius: 4px;
border-radius: 4px;
-webkit-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
-moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
}
.brand-logo a {
color: white;
img {
width: auto;
}
}
.logo {
// background-color: #A30000;
// color: white;
}
.inline-icon {
margin-bottom: 6px;
}
// Bootstrap elements
// ----------------------------------
.row {
margin-top: 15px;
margin-bottom: 15px;
}
.container {
// background-color: green;
}
// Styles on blocks of content
// ----------------------------------
// everything which is a block should have box-sizing: border-box;
div[class*='span']
{
-moz-box-sizing: border-box;
box-sizing: border-box;
}
// Box for making white with a border, and some nice spacings
.box {
padding: @box-padding-size;
background-color: white;
margin-top: @box-top-margin;
}
.paper {
background-color: white;
padding-top: 30px;
padding-bottom: 30px;
}
.copy-headline {
margin-top: 0px;
// border-bottom: 1.2px solid @veryDarkSeaGreen;
}
.box {
h1, h2, h3, h4 {
margin-top: -5px;
}
}
.nested {
padding: @box-padding-size;
}
.box.div {
padding: @box-padding-size;
}
span.read-more {
margin-left: 15px;
white-space: nowrap;
}
// set a top margin of @box-top-margin + 8 px to make it show a margin
//instead of the div being flush against the side. Typically only
// required for a stacked div in a column, w.o. using row.
.forcetopalign {
margin-top: 15px !important;
}
.forcetopmargin {
margin-top: 23px !important;
}
.forceleftalign {
margin-left: 15px !important;
}
.forceleftmargin {
margin-left: 21px !important;
}
// simple text aligns
.textcenter {
text-align: center;
}
.textright {
text-align: right;
}
.textsmaller {
font-size: @font-size-small;
}
.modal-backdrop {
opacity: 0.4;
}
/* generic page copy styles */
.copy-headline h1 {
font-size: 21px;
}
/* =======================
Sticky footer
======================= */
@sticky-footer-height: 280px;
html,
body {
height: 100%;
/* The html and body elements cannot have any padding or margin. */
}
/* Wrapper for page content to push down footer */
#wrap {
min-height: 100%;
height: auto !important;
height: 100%;
/* Negative indent footer by it's height */
margin: 0 auto -@sticky-footer-height;
}
/* Set the fixed height of the footer here */
#push-the-footer,
#footer {
height: @sticky-footer-height;
}
#footer {
// margin-bottom: -60px;
// margin-top: 160px;
}
.main-row {
padding-top: @navbar-height;
}
// Styles on the footer
// ----------------------------------
//
#footer .footer {
margin-top: 160px;
.ligaturesymbols {
font-size: 30px;
color: black;
a {
color: black;
}
}
.footerlist {
h3, h4 {
/* correct the top alignment */
margin-top: 0px;
}
}
}
.footer-landscape-image {
position: absolute:
bottom: 0;
margin-bottom: 0;
background-image: url('https://www.docker.io/static/img/website-footer_clean.svg');
background-repeat: repeat-x;
height: @sticky-footer-height;
}
.main-row {
margin-top: 40px;
}
.sidebar {
width: 215px;
float: left;
}
.main-content {
padding: 16px 18px inherit;
margin-left: 230px; /* space for sidebar */
}
/* =======================
Social footer
======================= */
.social {
margin-left: 0px;
margin-top: 15px;
}
.social {
.twitter, .github, .googleplus, .facebook, .slideshare, .linkedin, .flickr, .youtube, .reddit {
background: url("../img/social/docker_social_logos.png") no-repeat transparent;
display: inline-block;
height: 32px;
overflow: hidden;
text-indent: 9999px;
width: 32px;
margin-right: 5px;
}
}
.social :hover {
-webkit-transform: rotate(-10deg);
-moz-transform: rotate(-10deg);
-o-transform: rotate(-10deg);
-ms-transform: rotate(-10deg);
transform: rotate(-10deg);
}
.social .twitter {
background-position: -160px 0px;
}
.social .reddit {
background-position: -256px 0px;
}
.social .github {
background-position: -64px 0px;
}
.social .googleplus {
background-position: -96px 0px;
}
.social .facebook {
background-position: -0px 0px;
}
.social .slideshare {
background-position: -128px 0px;
}
.social .youtube {
background-position: -192px 0px;
}
.social .flickr {
background-position: -32px 0px;
}
.social .linkedin {
background-position: -224px 0px;
}
// Styles on the forms
// ----------------------------------
form table {
th {
vertical-align: top;
text-align: right;
white-space: nowrap;
}
}
form {
.labeldd label {
font-weight: bold;
}
.helptext {
font-size: @font-size-small;
margin-top: -4px;
margin-bottom: 10px;
}
.fielddd input {
width: 250px;
}
.error {
color: @deep-red;
}
[type=submit] {
// margin-top: -8px;
}
}
div.alert.alert-block {
margin-bottom: 15px;
}
/* ======================= =======================
Documentation
========================= ========================= */
/* =======================
Styles for the sidebar
========================= */
@sidebar-navigation-border: 1.5px solid #595959;
@sidebar-navigation-width: 225px;
.page-title {
// border-bottom: 1px solid #bbbbbb;
background-color: white;
border: 1px solid transparent;
text-align: center;
width: 100%;
h4 {
font-size: 20px;
}
}
.bs-docs-sidebar {
padding-left: 5px;
max-width: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
margin-top: 18px;
ul {
list-style: none;
margin-left: 0px;
}
.toctree-l2 > ul {
width: 100%;
}
ul > li {
&.toctree-l1.has-children {
background-image: url('../img/menu_arrow_right.gif');
background-repeat: no-repeat;
background-position: 13px 13px;
list-style-type: none;
// margin-left: px;
padding: 0px 0px 0px 0px;
vertical-align: middle;
&.open {
background-image: url('../img/menu_arrow_down.gif');
}
}
& > a {
box-sizing: border-box;
-moz-box-sizing: border-box;
width: 100%;
display:inline-block;
padding-top: 8px;
padding-bottom: 8px;
padding-left: 35px;
padding-right: 20px;
font-size: @font-size-base;
border-bottom: @sidebar-navigation-border;
line-height: 20px;
}
&:first-child.active > a {
border-top: @sidebar-navigation-border;
}
&:last-child > a {
border-bottom: none;
}
&:last-child.active > a {
border-bottom: @sidebar-navigation-border;
}
&.active > a {
border-right: @sidebar-navigation-border;
border-left: @sidebar-navigation-border;
color: @very-dark-sea-green;
}
&:hover {
background-color: #e8e8e8;
}
}
&.toctree-l3 ul {
display: inherit;
margin-left: 15px;
font-size: smaller;
}
.toctree-l3 a {
border: none;
font-size: 12px;
line-height: 15px;
}
ul > li > ul {
display: none;
}
ul > li.current > ul {
display: inline-block;
padding-left: 0px;
width: 100%;
}
}
.toctree-l2 {
&.current > a {
font-weight: bold;
}
&.current {
border: 1.5px solid #595959;
color: #394d54;
}
}
/* =====================================
Styles for the floating version widget
====================================== */
.version-flyer {
position: fixed;
float: right;
right: 0;
bottom: 40px;
background-color: #E0E0E0;
border: 1px solid #88BABC;
padding: 5px;
font-size: larger;
max-width: 300px;
.content {
padding-right: 45px;
margin-top: 7px;
margin-left: 7px;
background-image: url('../img/container3.png');
background-position: right center;
background-repeat: no-repeat;
}
.alternative {
}
.active-slug {
visibility: visible;
display: inline-block;
font-weight: bolder;
}
&:hover .alternative {
animation-duration: 1s;
display: inline-block;
}
.version-note {
font-size: 16px;
color: black;
}
}
/* =====================================
Styles for
====================================== */
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
h4:hover > a.headerlink,
h5:hover > a.headerlink,
h6:hover > a.headerlink,
dt:hover > a.headerlink {
visibility: visible;
}
.headerlink {
font-size: smaller;
color: #666;
font-weight: bold;
float: right;
visibility: hidden;
}
h2, h3, h4, h5, h6 {
margin-top: 0.7em;
}
/* =====================================
Miscellaneous information
====================================== */
.admonition {
&.warning, &.note, &.seealso, &.todo {
border: 3px solid black;
padding: 10px;
margin: 5px auto 10px;
}
.admonition-title {
font-size: larger;
}
&.warning, &.danger {
border-color: #ac0004;
}
&.note {
border-color: #cbc200;
}
&.todo {
border-color: orange;
}
&.seealso {
border-color: #23cb1f;
}
}
/* Add styles for other types of comments */
.versionchanged,
.versionadded,
.versionmodified,
.deprecated {
font-size: larger;
font-weight: bold;
}
.versionchanged {
color: lightseagreen;
}
.versionadded {
color: mediumblue;
}
.deprecated {
color: orangered;
}

View file

@ -1,622 +0,0 @@
//
// Variables
// --------------------------------------------------
// Global values
// --------------------------------------------------
// Grays
// -------------------------
@gray-darker: lighten(#000, 13.5%); // #222
@gray-dark: lighten(#000, 20%); // #333
@gray: lighten(#000, 33.5%); // #555
@gray-light: lighten(#000, 60%); // #999
@gray-lighter: lighten(#000, 93.5%); // #eee
// Brand colors
// -------------------------
@brand-primary: #428bca;
@brand-success: #5cb85c;
@brand-warning: #f0ad4e;
@brand-danger: #d9534f;
@brand-info: #5bc0de;
// Scaffolding
// -------------------------
@body-bg: #fff;
@text-color: @gray-dark;
// Links
// -------------------------
@link-color: @brand-primary;
@link-hover-color: darken(@link-color, 15%);
// Typography
// -------------------------
@font-family-sans-serif: "Cabin", "Helvetica Neue", Helvetica, Arial, sans-serif;
@font-family-serif: Georgia, "Times New Roman", Times, serif;
@font-family-monospace: Monaco, Menlo, Consolas, "Courier New", monospace;
@font-family-base: @font-family-sans-serif;
@font-size-base: 14px;
@font-size-large: ceil(@font-size-base * 1.25); // ~18px
@font-size-small: ceil(@font-size-base * 0.85); // ~12px
@line-height-base: 1.428571429; // 20/14
@line-height-computed: floor(@font-size-base * @line-height-base); // ~20px
@headings-font-family: @font-family-base;
@headings-font-weight: 500;
@headings-line-height: 1.1;
// Iconography
// -------------------------
@icon-font-path: "../fonts/";
@icon-font-name: "glyphicons-halflings-regular";
// Components
// -------------------------
// Based on 14px font-size and 1.428 line-height (~20px to start)
@padding-base-vertical: 6px;
@padding-base-horizontal: 12px;
@padding-large-vertical: 10px;
@padding-large-horizontal: 16px;
@padding-small-vertical: 5px;
@padding-small-horizontal: 10px;
@line-height-large: 1.33;
@line-height-small: 1.5;
@border-radius-base: 4px;
@border-radius-large: 6px;
@border-radius-small: 3px;
@component-active-bg: @brand-primary;
@caret-width-base: 4px;
@caret-width-large: 5px;
// Tables
// -------------------------
@table-cell-padding: 8px;
@table-condensed-cell-padding: 5px;
@table-bg: transparent; // overall background-color
@table-bg-accent: #f9f9f9; // for striping
@table-bg-hover: #f5f5f5;
@table-bg-active: @table-bg-hover;
@table-border-color: #ddd; // table and cell border
// Buttons
// -------------------------
@btn-font-weight: normal;
@btn-default-color: #333;
@btn-default-bg: #fff;
@btn-default-border: #ccc;
@btn-primary-color: #fff;
@btn-primary-bg: @brand-primary;
@btn-primary-border: darken(@btn-primary-bg, 5%);
@btn-success-color: #fff;
@btn-success-bg: @brand-success;
@btn-success-border: darken(@btn-success-bg, 5%);
@btn-warning-color: #fff;
@btn-warning-bg: @brand-warning;
@btn-warning-border: darken(@btn-warning-bg, 5%);
@btn-danger-color: #fff;
@btn-danger-bg: @brand-danger;
@btn-danger-border: darken(@btn-danger-bg, 5%);
@btn-info-color: #fff;
@btn-info-bg: @brand-info;
@btn-info-border: darken(@btn-info-bg, 5%);
@btn-link-disabled-color: @gray-light;
// Forms
// -------------------------
@input-bg: #fff;
@input-bg-disabled: @gray-lighter;
@input-color: @gray;
@input-border: #ccc;
@input-border-radius: @border-radius-base;
@input-border-focus: #66afe9;
@input-color-placeholder: @gray-light;
@input-height-base: (@line-height-computed + (@padding-base-vertical * 2) + 2);
@input-height-large: (floor(@font-size-large * @line-height-large) + (@padding-large-vertical * 2) + 2);
@input-height-small: (floor(@font-size-small * @line-height-small) + (@padding-small-vertical * 2) + 2);
@legend-color: @gray-dark;
@legend-border-color: #e5e5e5;
@input-group-addon-bg: @gray-lighter;
@input-group-addon-border-color: @input-border;
// Dropdowns
// -------------------------
@dropdown-bg: #fff;
@dropdown-border: rgba(0,0,0,.15);
@dropdown-fallback-border: #ccc;
@dropdown-divider-bg: #e5e5e5;
@dropdown-link-active-color: #fff;
@dropdown-link-active-bg: @component-active-bg;
@dropdown-link-color: @gray-dark;
@dropdown-link-hover-color: #fff;
@dropdown-link-hover-bg: @dropdown-link-active-bg;
@dropdown-link-disabled-color: @gray-light;
@dropdown-header-color: @gray-light;
@dropdown-caret-color: #000;
// COMPONENT VARIABLES
// --------------------------------------------------
// Z-index master list
// -------------------------
// Used for a bird's eye view of components dependent on the z-axis
// Try to avoid customizing these :)
@zindex-navbar: 1000;
@zindex-dropdown: 1000;
@zindex-popover: 1010;
@zindex-tooltip: 1030;
@zindex-navbar-fixed: 1030;
@zindex-modal-background: 1040;
@zindex-modal: 1050;
// Media queries breakpoints
// --------------------------------------------------
// Extra small screen / phone
@screen-xs: 480px;
@screen-phone: @screen-xs;
// Small screen / tablet
@screen-sm: 768px;
@screen-tablet: @screen-sm;
// Medium screen / desktop
@screen-md: 992px;
@screen-desktop: @screen-md;
// Large screen / wide desktop
@screen-lg: 1600px;
@screen-lg-desktop: @screen-lg;
// So media queries don't overlap when required, provide a maximum
@screen-xs-max: (@screen-sm - 1);
@screen-sm-max: (@screen-md - 1);
@screen-md-max: (@screen-lg - 1);
// Grid system
// --------------------------------------------------
// Number of columns in the grid system
@grid-columns: 12;
// Padding, to be divided by two and applied to the left and right of all columns
@grid-gutter-width: 30px;
// Point at which the navbar stops collapsing
@grid-float-breakpoint: @screen-desktop;
// Navbar
// -------------------------
// Basics of a navbar
@navbar-height: 50px;
@navbar-margin-bottom: @line-height-computed;
@navbar-default-color: #777;
@navbar-default-bg: #f8f8f8;
@navbar-default-border: darken(@navbar-default-bg, 6.5%);
@navbar-border-radius: @border-radius-base;
@navbar-padding-horizontal: floor(@grid-gutter-width / 2);
@navbar-padding-vertical: ((@navbar-height - @line-height-computed) / 2);
// Navbar links
@navbar-default-link-color: #777;
@navbar-default-link-hover-color: #333;
@navbar-default-link-hover-bg: transparent;
@navbar-default-link-active-color: #555;
@navbar-default-link-active-bg: darken(@navbar-default-bg, 6.5%);
@navbar-default-link-disabled-color: #ccc;
@navbar-default-link-disabled-bg: transparent;
// Navbar brand label
@navbar-default-brand-color: @navbar-default-link-color;
@navbar-default-brand-hover-color: darken(@navbar-default-link-color, 10%);
@navbar-default-brand-hover-bg: transparent;
// Navbar toggle
@navbar-default-toggle-hover-bg: #ddd;
@navbar-default-toggle-icon-bar-bg: #ccc;
@navbar-default-toggle-border-color: #ddd;
// Inverted navbar
//
// Reset inverted navbar basics
@navbar-inverse-color: @gray-light;
@navbar-inverse-bg: #222;
@navbar-inverse-border: darken(@navbar-inverse-bg, 10%);
// Inverted navbar links
@navbar-inverse-link-color: @gray-light;
@navbar-inverse-link-hover-color: #fff;
@navbar-inverse-link-hover-bg: transparent;
@navbar-inverse-link-active-color: @navbar-inverse-link-hover-color;
@navbar-inverse-link-active-bg: darken(@navbar-inverse-bg, 10%);
@navbar-inverse-link-disabled-color: #444;
@navbar-inverse-link-disabled-bg: transparent;
// Inverted navbar brand label
@navbar-inverse-brand-color: @navbar-inverse-link-color;
@navbar-inverse-brand-hover-color: #fff;
@navbar-inverse-brand-hover-bg: transparent;
// Inverted navbar search
// Normal navbar needs no special styles or vars
@navbar-inverse-search-bg: lighten(@navbar-inverse-bg, 25%);
@navbar-inverse-search-bg-focus: #fff;
@navbar-inverse-search-border: @navbar-inverse-bg;
@navbar-inverse-search-placeholder-color: #ccc;
// Inverted navbar toggle
@navbar-inverse-toggle-hover-bg: #333;
@navbar-inverse-toggle-icon-bar-bg: #fff;
@navbar-inverse-toggle-border-color: #333;
// Navs
// -------------------------
@nav-link-padding: 10px 15px;
@nav-link-hover-bg: @gray-lighter;
@nav-disabled-link-color: @gray-light;
@nav-disabled-link-hover-color: @gray-light;
@nav-open-link-hover-color: #fff;
@nav-open-caret-border-color: #fff;
// Tabs
@nav-tabs-border-color: #ddd;
@nav-tabs-link-hover-border-color: @gray-lighter;
@nav-tabs-active-link-hover-bg: @body-bg;
@nav-tabs-active-link-hover-color: @gray;
@nav-tabs-active-link-hover-border-color: #ddd;
@nav-tabs-justified-link-border-color: #ddd;
@nav-tabs-justified-active-link-border-color: @body-bg;
// Pills
@nav-pills-active-link-hover-bg: @component-active-bg;
@nav-pills-active-link-hover-color: #fff;
// Pagination
// -------------------------
@pagination-bg: #fff;
@pagination-border: #ddd;
@pagination-hover-bg: @gray-lighter;
@pagination-active-bg: @brand-primary;
@pagination-active-color: #fff;
@pagination-disabled-color: @gray-light;
// Pager
// -------------------------
@pager-border-radius: 15px;
@pager-disabled-color: @gray-light;
// Jumbotron
// -------------------------
@jumbotron-padding: 30px;
@jumbotron-color: inherit;
@jumbotron-bg: @gray-lighter;
@jumbotron-heading-color: inherit;
// Form states and alerts
// -------------------------
@state-warning-text: #c09853;
@state-warning-bg: #fcf8e3;
@state-warning-border: darken(spin(@state-warning-bg, -10), 3%);
@state-danger-text: #b94a48;
@state-danger-bg: #f2dede;
@state-danger-border: darken(spin(@state-danger-bg, -10), 3%);
@state-success-text: #468847;
@state-success-bg: #dff0d8;
@state-success-border: darken(spin(@state-success-bg, -10), 5%);
@state-info-text: #3a87ad;
@state-info-bg: #d9edf7;
@state-info-border: darken(spin(@state-info-bg, -10), 7%);
// Tooltips
// -------------------------
@tooltip-max-width: 200px;
@tooltip-color: #fff;
@tooltip-bg: #000;
@tooltip-arrow-width: 5px;
@tooltip-arrow-color: @tooltip-bg;
// Popovers
// -------------------------
@popover-bg: #fff;
@popover-max-width: 276px;
@popover-border-color: rgba(0,0,0,.2);
@popover-fallback-border-color: #ccc;
@popover-title-bg: darken(@popover-bg, 3%);
@popover-arrow-width: 10px;
@popover-arrow-color: #fff;
@popover-arrow-outer-width: (@popover-arrow-width + 1);
@popover-arrow-outer-color: rgba(0,0,0,.25);
@popover-arrow-outer-fallback-color: #999;
// Labels
// -------------------------
@label-default-bg: @gray-light;
@label-primary-bg: @brand-primary;
@label-success-bg: @brand-success;
@label-info-bg: @brand-info;
@label-warning-bg: @brand-warning;
@label-danger-bg: @brand-danger;
@label-color: #fff;
@label-link-hover-color: #fff;
// Modals
// -------------------------
@modal-inner-padding: 20px;
@modal-title-padding: 15px;
@modal-title-line-height: @line-height-base;
@modal-content-bg: #fff;
@modal-content-border-color: rgba(0,0,0,.2);
@modal-content-fallback-border-color: #999;
@modal-backdrop-bg: #000;
@modal-header-border-color: #e5e5e5;
@modal-footer-border-color: @modal-header-border-color;
// Alerts
// -------------------------
@alert-padding: 15px;
@alert-border-radius: @border-radius-base;
@alert-link-font-weight: bold;
@alert-success-bg: @state-success-bg;
@alert-success-text: @state-success-text;
@alert-success-border: @state-success-border;
@alert-info-bg: @state-info-bg;
@alert-info-text: @state-info-text;
@alert-info-border: @state-info-border;
@alert-warning-bg: @state-warning-bg;
@alert-warning-text: @state-warning-text;
@alert-warning-border: @state-warning-border;
@alert-danger-bg: @state-danger-bg;
@alert-danger-text: @state-danger-text;
@alert-danger-border: @state-danger-border;
// Progress bars
// -------------------------
@progress-bg: #f5f5f5;
@progress-bar-color: #fff;
@progress-bar-bg: @brand-primary;
@progress-bar-success-bg: @brand-success;
@progress-bar-warning-bg: @brand-warning;
@progress-bar-danger-bg: @brand-danger;
@progress-bar-info-bg: @brand-info;
// List group
// -------------------------
@list-group-bg: #fff;
@list-group-border: #ddd;
@list-group-border-radius: @border-radius-base;
@list-group-hover-bg: #f5f5f5;
@list-group-active-color: #fff;
@list-group-active-bg: @component-active-bg;
@list-group-active-border: @list-group-active-bg;
@list-group-link-color: #555;
@list-group-link-heading-color: #333;
// Panels
// -------------------------
@panel-bg: #fff;
@panel-inner-border: #ddd;
@panel-border-radius: @border-radius-base;
@panel-footer-bg: #f5f5f5;
@panel-default-text: @gray-dark;
@panel-default-border: #ddd;
@panel-default-heading-bg: #f5f5f5;
@panel-primary-text: #fff;
@panel-primary-border: @brand-primary;
@panel-primary-heading-bg: @brand-primary;
@panel-success-text: @state-success-text;
@panel-success-border: @state-success-border;
@panel-success-heading-bg: @state-success-bg;
@panel-warning-text: @state-warning-text;
@panel-warning-border: @state-warning-border;
@panel-warning-heading-bg: @state-warning-bg;
@panel-danger-text: @state-danger-text;
@panel-danger-border: @state-danger-border;
@panel-danger-heading-bg: @state-danger-bg;
@panel-info-text: @state-info-text;
@panel-info-border: @state-info-border;
@panel-info-heading-bg: @state-info-bg;
// Thumbnails
// -------------------------
@thumbnail-padding: 4px;
@thumbnail-bg: @body-bg;
@thumbnail-border: #ddd;
@thumbnail-border-radius: @border-radius-base;
@thumbnail-caption-color: @text-color;
@thumbnail-caption-padding: 9px;
// Wells
// -------------------------
@well-bg: #f5f5f5;
// Badges
// -------------------------
@badge-color: #fff;
@badge-link-hover-color: #fff;
@badge-bg: @gray-light;
@badge-active-color: @link-color;
@badge-active-bg: #fff;
@badge-font-weight: bold;
@badge-line-height: 1;
@badge-border-radius: 10px;
// Breadcrumbs
// -------------------------
@breadcrumb-bg: #f5f5f5;
@breadcrumb-color: #ccc;
@breadcrumb-active-color: @gray-light;
// Carousel
// ------------------------
@carousel-text-shadow: 0 1px 2px rgba(0,0,0,.6);
@carousel-control-color: #fff;
@carousel-control-width: 15%;
@carousel-control-opacity: .5;
@carousel-control-font-size: 20px;
@carousel-indicator-active-bg: #fff;
@carousel-indicator-border-color: #fff;
@carousel-caption-color: #fff;
// Close
// ------------------------
@close-color: #000;
@close-font-weight: bold;
@close-text-shadow: 0 1px 0 #fff;
// Code
// ------------------------
@code-color: #c7254e;
@code-bg: #f9f2f4;
@pre-bg: #f5f5f5;
@pre-color: @gray-dark;
@pre-border-color: #ccc;
@pre-scrollable-max-height: 340px;
// Type
// ------------------------
@text-muted: @gray-light;
@abbr-border-color: @gray-light;
@headings-small-color: @gray-light;
@blockquote-small-color: @gray-light;
@blockquote-border-color: @gray-lighter;
@page-header-border-color: @gray-lighter;
// Miscellaneous
// -------------------------
// Hr border color
@hr-border: @gray-lighter;
// Horizontal forms & lists
@component-offset-horizontal: 180px;
// Container sizes
// --------------------------------------------------
// Small screen / tablet
@container-tablet: ((720px + @grid-gutter-width));
// Medium screen / desktop
@container-desktop: ((940px + @grid-gutter-width));
// Large screen / wide desktop
@container-lg-desktop: ((1140px + @grid-gutter-width));

Some files were not shown because too many files have changed in this diff Show more