Update to double-dash everywhere

These were found using `git grep -nE '[^-a-zA-Z0-9<>]-[a-zA-Z0-9]{2}'` (fair warning: _many_ false positives there).

Docker-DCO-1.1-Signed-off-by: Andrew Page <admwiggin@gmail.com> (github: tianon)
This commit is contained in:
Tianon Gravi 2014-03-13 11:46:02 -06:00
parent 51a46e6a4f
commit 44fe8cbbd1
51 changed files with 188 additions and 188 deletions

View file

@ -6,13 +6,13 @@
# docker build -t docker .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -i -t docker bash
# docker run -v `pwd`:/go/src/github.com/dotcloud/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run -privileged docker hack/make.sh test
# docker run --privileged docker hack/make.sh test
#
# # Publish a release:
# docker run -privileged \
# docker run --privileged \
# -e AWS_S3_BUCKET=baz \
# -e AWS_ACCESS_KEY=foo \
# -e AWS_SECRET_KEY=bar \

View file

@ -1409,7 +1409,7 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
if err := cmd.Parse(args); err != nil {
return nil
}

View file

@ -79,7 +79,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d '
complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes"
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (eg. "John Hannibal Smith <hannibal@a-team.com>"'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: --run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container"
# cp
@ -202,7 +202,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expo
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: <number><optional unit>, where unit = b, k, m or g)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container'

View file

@ -174,7 +174,7 @@ __docker_subcommand () {
(ps)
_arguments '-a[Show all containers. Only running containers are shown by default]' \
'-h[Show help]' \
'-before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
'--before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
'-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)'
;;
(tag)

View file

@ -9,13 +9,13 @@
# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile
#
# # Build data image
# docker build -t data -rm .
# docker build -t data .
#
# # Create a data container. (eg: iceweasel-data)
# docker run -name iceweasel-data data true
# docker run --name iceweasel-data data true
#
# # List data from it
# docker run -volumes-from iceweasel-data busybox ls -al /data
# docker run --volumes-from iceweasel-data busybox ls -al /data
docker-version 0.6.5

View file

@ -10,16 +10,16 @@
# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile
#
# # Build iceweasel image
# docker build -t iceweasel -rm .
# docker build -t iceweasel .
#
# # Run stateful data-on-host iceweasel. For ephemeral, remove -v /data/iceweasel:/data
# docker run -v /data/iceweasel:/data -v /tmp/.X11-unix:/tmp/.X11-unix \
# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
# -e DISPLAY=unix$DISPLAY iceweasel
#
# # To run stateful dockerized data containers
# docker run -volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \
# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
# docker run --volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \
# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
# -e DISPLAY=unix$DISPLAY iceweasel
docker-version 0.6.5

View file

@ -4,7 +4,7 @@
#DOCKER="/usr/local/bin/docker"
# Use DOCKER_OPTS to modify the daemon startup options.
#DOCKER_OPTS="-dns 8.8.8.8 -dns 8.8.4.4"
#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4"
# If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/"

View file

@ -10,7 +10,7 @@
# can configure the list of syscalls. When run, this script produces output
# which, when stored in a file, can be passed to docker as follows:
#
# docker run -lxc-conf="lxc.seccomp=$file" <rest of arguments>
# docker run --lxc-conf="lxc.seccomp=$file" <rest of arguments>
#
# The included sample file shows how to cut about a quarter of all syscalls,
# which affecting most applications.

View file

@ -1,9 +1,9 @@
#
# Build: docker build -t apt-cacher .
# Run: docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher
# Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher
#
# and then you can run containers with:
# docker run -t -i -rm -e http_proxy http://dockerhost:3142/ debian bash
# docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash
#
FROM ubuntu
MAINTAINER SvenDowideit@docker.com

View file

@ -23,13 +23,13 @@ To build the image using:
.. code-block:: bash
$ sudo docker build -rm -t eg_apt_cacher_ng .
$ sudo docker build -t eg_apt_cacher_ng .
Then run it, mapping the exposed port to one on the host
.. code-block:: bash
$ sudo docker run -d -p 3142:3142 -name test_apt_cacher_ng eg_apt_cacher_ng
$ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
To see the logfiles that are 'tailed' in the default command, you can use:
@ -59,7 +59,7 @@ break other HTTP clients which obey ``http_proxy``, such as ``curl``, ``wget`` a
.. code-block:: bash
$ sudo docker run -rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
$ sudo docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
**Option 3** is the least portable, but there will be times when you might need to
do it and you can do it from your ``Dockerfile`` too.
@ -70,7 +70,7 @@ service:
.. code-block:: bash
$ sudo docker run -rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
$ sudo docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
$$ /usr/lib/apt-cacher-ng/distkill.pl
Scanning /var/cache/apt-cacher-ng, please wait...

View file

@ -119,13 +119,13 @@ Check the logs make sure it is working correctly.
.. code-block:: bash
sudo docker attach -sig-proxy=false $container_id
sudo docker attach --sig-proxy=false $container_id
Attach to the container to see the results in real-time.
- **"docker attach**" This will allow us to attach to a background
process to see what is going on.
- **"-sig-proxy=false"** Do not forward signals to the container; allows
- **"--sig-proxy=false"** Do not forward signals to the container; allows
us to exit the attachment using Control-C without stopping the container.
- **$container_id** The Id of the container we want to attach too.

View file

@ -37,24 +37,24 @@ And run the PostgreSQL server container (in the foreground):
.. code-block:: bash
$ sudo docker run -rm -P -name pg_test eg_postgresql
$ sudo docker run --rm -P -name pg_test eg_postgresql
There are 2 ways to connect to the PostgreSQL server. We can use
:ref:`working_with_links_names`, or we can access it from our host (or the network).
.. note:: The ``-rm`` removes the container and its image when the container
.. note:: The ``--rm`` removes the container and its image when the container
exists successfully.
Using container linking
^^^^^^^^^^^^^^^^^^^^^^^
Containers can be linked to another container's ports directly using
``-link remote_name:local_alias`` in the client's ``docker run``. This will
``--link remote_name:local_alias`` in the client's ``docker run``. This will
set a number of environment variables that can then be used to connect:
.. code-block:: bash
$ sudo docker run -rm -t -i -link pg_test:pg eg_postgresql bash
$ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash
postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password
@ -104,7 +104,7 @@ configuration and data:
.. code-block:: bash
docker run -rm --volumes-from pg_test -t -i busybox sh
docker run --rm --volumes-from pg_test -t -i busybox sh
/ # ls
bin etc lib linuxrc mnt proc run sys usr

View file

@ -51,7 +51,7 @@ try things out, and then exit when you're done.
.. code-block:: bash
$ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash
$ sudo docker run -i -t --name pybuilder_run shykes/pybuilder bash
$$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz
$$ /usr/local/bin/buildapp $URL

View file

@ -49,7 +49,7 @@ use a container link to provide access to our Redis database.
Create your web application container
-------------------------------------
Next we can create a container for our application. We're going to use the ``-link``
Next we can create a container for our application. We're going to use the ``--link``
flag to create a link to the ``redis`` container we've just created with an alias of
``db``. This will create a secure tunnel to the ``redis`` container and expose the
Redis instance running inside that container to only this container.

View file

@ -19,14 +19,14 @@ Build the image using:
.. code-block:: bash
$ sudo docker build -rm -t eg_sshd .
$ sudo docker build -t eg_sshd .
Then run it. You can then use ``docker port`` to find out what host port the container's
port 22 is mapped to:
.. code-block:: bash
$ sudo docker run -d -P -name test_sshd eg_sshd
$ sudo docker run -d -P --name test_sshd eg_sshd
$ sudo docker port test_sshd 22
0.0.0.0:49154

View file

@ -309,9 +309,9 @@ daemon for the containers:
sudo nano /etc/default/docker
---
# Add:
DOCKER_OPTS="-dns 8.8.8.8"
DOCKER_OPTS="--dns 8.8.8.8"
# 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1
# multiple DNS servers can be specified: -dns 8.8.8.8 -dns 192.168.1.1
# multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1
The Docker daemon has to be restarted:

View file

@ -203,7 +203,7 @@ What's new
.. http:get:: /images/viz
This URI no longer exists. The ``images -viz`` output is now generated in
This URI no longer exists. The ``images --viz`` output is now generated in
the client, using the ``/images/json`` data.
v1.6

View file

@ -1276,8 +1276,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
docker -d -H="192.168.1.9:4243" -api-enable-cors
docker -d -H="192.168.1.9:4243" --api-enable-cors

View file

@ -1045,7 +1045,7 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
docker -d -H="tcp://192.168.1.9:4243" -api-enable-cors
docker -d -H="tcp://192.168.1.9:4243" --api-enable-cors

View file

@ -1124,7 +1124,7 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
docker -d -H="192.168.1.9:4243" -api-enable-cors
docker -d -H="192.168.1.9:4243" --api-enable-cors

View file

@ -1168,9 +1168,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
docker -d -H="192.168.1.9:4243" -api-enable-cors
docker -d -H="192.168.1.9:4243" --api-enable-cors

View file

@ -1137,8 +1137,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
docker -d -H="192.168.1.9:4243" -api-enable-cors
docker -d -H="192.168.1.9:4243" --api-enable-cors

View file

@ -1274,9 +1274,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
docker -d -H="192.168.1.9:4243" -api-enable-cors
docker -d -H="192.168.1.9:4243" --api-enable-cors

View file

@ -1254,9 +1254,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
docker -d -H="192.168.1.9:4243" -api-enable-cors
docker -d -H="192.168.1.9:4243" --api-enable-cors

View file

@ -1287,8 +1287,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
docker -d -H="192.168.1.9:4243" -api-enable-cors
docker -d -H="192.168.1.9:4243" --api-enable-cors

View file

@ -1288,8 +1288,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
docker -d -H="192.168.1.9:4243" -api-enable-cors
docker -d -H="192.168.1.9:4243" --api-enable-cors

View file

@ -193,7 +193,7 @@ well.
When used in the shell or exec formats, the ``CMD`` instruction sets
the command to be executed when running the image. This is
functionally equivalent to running ``docker commit -run '{"Cmd":
functionally equivalent to running ``docker commit --run '{"Cmd":
<command>}'`` outside the builder.
If you use the *shell* form of the CMD, then the ``<command>`` will
@ -235,7 +235,7 @@ override the default specified in CMD.
``EXPOSE <port> [<port>...]``
The ``EXPOSE`` instruction exposes ports for use within links. This is
functionally equivalent to running ``docker commit -run '{"PortSpecs":
functionally equivalent to running ``docker commit --run '{"PortSpecs":
["<port>", "<port2>"]}'`` outside the builder. Refer to
:ref:`port_redirection` for detailed information.

View file

@ -52,7 +52,7 @@ Sometimes this can use a more complex value string, as for ``-v``::
Strings and Integers
~~~~~~~~~~~~~~~~~~~~
Options like ``-name=""`` expect a string, and they can only be
Options like ``--name=""`` expect a string, and they can only be
specified once. Options like ``-c=0`` expect an integer, and they can
only be specified once.
@ -94,7 +94,7 @@ daemon and client. To run the daemon you provide the ``-d`` flag.
To force Docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``.
To set the DNS server for all Docker containers, use ``docker -d -dns 8.8.8.8``.
To set the DNS server for all Docker containers, use ``docker -d --dns 8.8.8.8``.
To run the daemon with debug output, use ``docker -d -D``.
@ -305,7 +305,7 @@ by using the ``git://`` schema.
-m, --message="": Commit message
-a, --author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
--run="": Configuration changes to be applied when the image is launched with `docker run`.
(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
(ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
.. _cli_commit_examples:
@ -335,9 +335,9 @@ run ``ls /etc``.
.. code-block:: bash
$ docker run -t -name test ubuntu ls
$ docker run -t --name test ubuntu ls
bin boot dev etc home lib lib64 media mnt opt proc root run sbin selinux srv sys tmp usr var
$ docker commit -run='{"Cmd": ["ls","/etc"]}' test test2
$ docker commit --run='{"Cmd": ["ls","/etc"]}' test test2
933d16de9e70005304c1717b5c6f2f39d6fd50752834c6f34a155c70790011eb
$ docker run -t test2
adduser.conf gshadow login.defs rc0.d
@ -358,7 +358,7 @@ Say you have a Dockerfile like so:
CMD ["/usr/sbin/sshd -D"]
...
If you run that, make some changes, and then commit, Docker will merge the environment variable and exposed port configuration settings with any that you specify in the -run= option. This is a change from Docker 0.8.0 and prior where no attempt was made to preserve any existing configuration on commit.
If you run that, make some changes, and then commit, Docker will merge the environment variable and exposed port configuration settings with any that you specify in the --run= option. This is a change from Docker 0.8.0 and prior where no attempt was made to preserve any existing configuration on commit.
.. code-block:: bash
@ -366,14 +366,14 @@ If you run that, make some changes, and then commit, Docker will merge the envir
$ docker run -t -i me/foo /bin/bash
foo-container$ [make changes in the container]
foo-container$ exit
$ docker commit -run='{"Cmd": ["ls"]}' [container-id] me/bar
$ docker commit --run='{"Cmd": ["ls"]}' [container-id] me/bar
...
The me/bar image will now have port 22 exposed, MYVAR env var set to 'foobar', and its default command will be ["ls"].
Note that this is currently a shallow merge. So, for example, if you had specified a new port spec in the -run= config above, that would have clobbered the 'EXPOSE 22' setting from the parent container.
Note that this is currently a shallow merge. So, for example, if you had specified a new port spec in the --run= config above, that would have clobbered the 'EXPOSE 22' setting from the parent container.
Full -run example
Full --run example
.................
The ``--run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
@ -384,7 +384,7 @@ not overridden in the JSON hash will be merged in.
.. code-block:: bash
$ sudo docker commit -run='
$ sudo docker commit --run='
{
"Entrypoint" : null,
"Privileged" : false,
@ -516,16 +516,16 @@ Show events in the past from a specified time
.. code-block:: bash
$ sudo docker events -since 1378216169
$ sudo docker events --since 1378216169
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
$ sudo docker events -since '2013-09-03'
$ sudo docker events --since '2013-09-03'
[2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
$ sudo docker events -since '2013-09-03 15:49:29 +0200 CEST'
$ sudo docker events --since '2013-09-03 15:49:29 +0200 CEST'
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
@ -829,7 +829,7 @@ text output:
.. code-block:: bash
$ sudo docker inspect -format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
$ sudo docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
Find a Specific Port Mapping
............................
@ -844,7 +844,7 @@ we ask for the ``HostPort`` field to get the public address.
.. code-block:: bash
$ sudo docker inspect -format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
$ sudo docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
Get config
..........
@ -856,7 +856,7 @@ to convert config object into JSON
.. code-block:: bash
$ sudo docker inspect -format='{{json .config}}' $INSTANCE_ID
$ sudo docker inspect --format='{{json .config}}' $INSTANCE_ID
.. _cli_kill:
@ -1151,7 +1151,7 @@ image is removed.
--volumes-from="": Mount all volumes from the given container(s)
--entrypoint="": Overwrite the default entrypoint set by the image
-w, --workdir="": Working directory inside the container
--lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
--lxc-conf=[]: Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
--sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
--expose=[]: Expose a port from the container without publishing it to your host
--link="": Add link to another container (name:alias)
@ -1171,7 +1171,7 @@ See :ref:`port_redirection` for more detailed information about the ``--expose``
``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for
specific examples using ``--link``.
Known Issues (run -volumes-from)
Known Issues (run --volumes-from)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
@ -1199,7 +1199,7 @@ error. Docker will close this file when ``docker run`` exits.
This will *not* work, because by default, most potentially dangerous
kernel capabilities are dropped; including ``cap_sys_admin`` (which is
required to mount filesystems). However, the ``-privileged`` flag will
required to mount filesystems). However, the ``--privileged`` flag will
allow it to run:
.. code-block:: bash
@ -1211,7 +1211,7 @@ allow it to run:
none 1.9G 0 1.9G 0% /mnt
The ``-privileged`` flag gives *all* capabilities to the container,
The ``--privileged`` flag gives *all* capabilities to the container,
and it also lifts all the limitations enforced by the ``device``
cgroup controller. In other words, the container can then do almost
everything that the host can do. This flag exists to allow special
@ -1313,7 +1313,7 @@ This example shows 5 containers that might be set up to test a web application c
2. Start a pre-prepared ``riakserver`` image, give the container name ``riak`` and expose port ``8098`` to any containers that link to it;
3. Start the ``appserver`` image, restricting its memory usage to 100MB, setting two environment variables ``DEVELOPMENT`` and ``BRANCH`` and bind-mounting the current directory (``$(pwd)``) in the container in read-only mode as ``/app/bin``;
4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate;
5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``-rm`` option means that when the container exits, the container's layer is removed.
5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``--rm`` option means that when the container exits, the container's layer is removed.
.. _cli_save:

View file

@ -80,7 +80,7 @@ through network connections or shared volumes because the container is
no longer listening to the commandline where you executed ``docker
run``. You can reattach to a detached container with ``docker``
:ref:`cli_attach`. If you choose to run a container in the detached
mode, then you cannot use the ``-rm`` option.
mode, then you cannot use the ``--rm`` option.
Foreground
..........
@ -92,10 +92,10 @@ error. It can even pretend to be a TTY (this is what most commandline
executables expect) and pass along signals. All of that is
configurable::
-a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr``
-t=false : Allocate a pseudo-tty
-sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
-i=false : Keep STDIN open even if not attached
-a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr``
-t=false : Allocate a pseudo-tty
--sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
-i=false : Keep STDIN open even if not attached
If you do not specify ``-a`` then Docker will `attach everything
(stdin,stdout,stderr)
@ -112,7 +112,7 @@ as well as persistent standard input (``stdin``), so you'll use ``-i
Container Identification
------------------------
Name (-name)
Name (--name)
............
The operator can identify a container in three ways:
@ -122,7 +122,7 @@ The operator can identify a container in three ways:
* Name ("evil_ptolemy")
The UUID identifiers come from the Docker daemon, and if you do not
assign a name to the container with ``-name`` then the daemon will
assign a name to the container with ``--name`` then the daemon will
also generate a random string name too. The name can become a handy
way to add meaning to a container since you can use this name when
defining :ref:`links <working_with_links_names>` (or any other place
@ -137,7 +137,7 @@ container ID out to a file of your choosing. This is similar to how
some programs might write out their process ID to a file (you've seen
them as PID files)::
-cidfile="": Write the container ID to the file
--cidfile="": Write the container ID to the file
Network Settings
----------------
@ -145,7 +145,7 @@ Network Settings
::
-n=true : Enable networking for this container
-dns=[] : Set custom dns servers for the container
--dns=[] : Set custom dns servers for the container
By default, all containers have networking enabled and they can make
any outgoing connections. The operator can completely disable
@ -154,9 +154,9 @@ networking. In cases like this, you would perform I/O through files or
STDIN/STDOUT only.
Your container will use the same DNS servers as the host by default,
but you can override this with ``-dns``.
but you can override this with ``--dns``.
Clean Up (-rm)
Clean Up (--rm)
--------------
By default a container's file system persists even after the container
@ -165,9 +165,9 @@ final state) and you retain all your data by default. But if you are
running short-term **foreground** processes, these container file
systems can really pile up. If instead you'd like Docker to
**automatically clean up the container and remove the file system when
the container exits**, you can add the ``-rm`` flag::
the container exits**, you can add the ``--rm`` flag::
-rm=false: Automatically remove the container when it exits (incompatible with -d)
--rm=false: Automatically remove the container when it exits (incompatible with -d)
Runtime Constraints on CPU and Memory
@ -193,8 +193,8 @@ Runtime Privilege and LXC Configuration
::
-privileged=false: Give extended privileges to this container
-lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
--privileged=false: Give extended privileges to this container
--lxc-conf=[]: Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
By default, Docker containers are "unprivileged" and cannot, for
example, run a Docker daemon inside a Docker container. This is
@ -203,16 +203,16 @@ but a "privileged" container is given access to all devices (see
lxc-template.go_ and documentation on `cgroups devices
<https://www.kernel.org/doc/Documentation/cgroups/devices.txt>`_).
When the operator executes ``docker run -privileged``, Docker will
When the operator executes ``docker run --privileged``, Docker will
enable to access to all devices on the host as well as set some
configuration in AppArmor to allow the container nearly all the same
access to the host as processes running outside containers on the
host. Additional information about running with ``-privileged`` is
host. Additional information about running with ``--privileged`` is
available on the `Docker Blog
<http://blog.docker.io/2013/09/docker-can-now-run-within-docker/>`_.
An operator can also specify LXC options using one or more
``-lxc-conf`` parameters. These can be new parameters or override
``--lxc-conf`` parameters. These can be new parameters or override
existing parameters from the lxc-template.go_. Note that in the
future, a given host's Docker daemon may not use LXC, so this is an
implementation-specific configuration meant for operators already
@ -260,7 +260,7 @@ ENTRYPOINT (Default Command to Execute at Runtime
::
-entrypoint="": Overwrite the default entrypoint set by the image
--entrypoint="": Overwrite the default entrypoint set by the image
The ENTRYPOINT of an image is similar to a ``COMMAND`` because it
specifies what executable to run when the container starts, but it is
@ -274,12 +274,12 @@ runtime by using a string to specify the new ``ENTRYPOINT``. Here is an
example of how to run a shell in a container that has been set up to
automatically run something else (like ``/usr/bin/redis-server``)::
docker run -i -t -entrypoint /bin/bash example/redis
docker run -i -t --entrypoint /bin/bash example/redis
or two examples of how to pass more parameters to that ENTRYPOINT::
docker run -i -t -entrypoint /bin/bash example/redis -c ls -l
docker run -i -t -entrypoint /usr/bin/redis-cli example/redis --help
docker run -i -t --entrypoint /bin/bash example/redis -c ls -l
docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help
EXPOSE (Incoming Ports)
@ -290,16 +290,16 @@ providing the ``EXPOSE`` instruction to give a hint to the operator
about what incoming ports might provide services. The following
options work with or override the ``Dockerfile``'s exposed defaults::
-expose=[]: Expose a port from the container
--expose=[]: Expose a port from the container
without publishing it to your host
-P=false : Publish all exposed ports to the host interfaces
-p=[] : Publish a container's port to the host (format:
ip:hostPort:containerPort | ip::containerPort |
hostPort:containerPort)
(use 'docker port' to see the actual mapping)
-link="" : Add link to another container (name:alias)
-P=false : Publish all exposed ports to the host interfaces
-p=[] : Publish a container's port to the host (format:
ip:hostPort:containerPort | ip::containerPort |
hostPort:containerPort)
(use 'docker port' to see the actual mapping)
--link="" : Add link to another container (name:alias)
As mentioned previously, ``EXPOSE`` (and ``-expose``) make a port
As mentioned previously, ``EXPOSE`` (and ``--expose``) make a port
available **in** a container for incoming connections. The port number
on the inside of the container (where the service listens) does not
need to be the same number as the port exposed on the outside of the
@ -308,16 +308,16 @@ have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in
the ``Dockerfile``), but outside the container the port might be 42800.
To help a new client container reach the server container's internal
port operator ``-expose``'d by the operator or ``EXPOSE``'d by the
port operator ``--expose``'d by the operator or ``EXPOSE``'d by the
developer, the operator has three choices: start the server container
with ``-P`` or ``-p,`` or start the client container with ``-link``.
with ``-P`` or ``-p,`` or start the client container with ``--link``.
If the operator uses ``-P`` or ``-p`` then Docker will make the
exposed port accessible on the host and the ports will be available to
any client that can reach the host. To find the map between the host
ports and the exposed ports, use ``docker port``)
If the operator uses ``-link`` when starting the new client container,
If the operator uses ``--link`` when starting the new client container,
then the client container can access the exposed port via a private
networking interface. Docker will set some environment variables in
the client container to help indicate which interface and port to use.
@ -329,7 +329,7 @@ The operator can **set any environment variable** in the container by
using one or more ``-e`` flags, even overriding those already defined by the
developer with a Dockefile ``ENV``::
$ docker run -e "deep=purple" -rm ubuntu /bin/bash -c export
$ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export
declare -x HOME="/"
declare -x HOSTNAME="85bc26a0e200"
declare -x OLDPWD
@ -341,13 +341,13 @@ developer with a Dockefile ``ENV``::
Similarly the operator can set the **hostname** with ``-h``.
``-link name:alias`` also sets environment variables, using the
``--link name:alias`` also sets environment variables, using the
*alias* string to define environment variables within the container
that give the IP and PORT information for connecting to the service
container. Let's imagine we have a container running Redis::
# Start the service container, named redis-name
$ docker run -d -name redis-name dockerfiles/redis
$ docker run -d --name redis-name dockerfiles/redis
4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3
# The redis-name container exposed port 6379
@ -361,12 +361,12 @@ container. Let's imagine we have a container running Redis::
Yet we can get information about the Redis container's exposed ports
with ``-link``. Choose an alias that will form a valid environment
with ``--link``. Choose an alias that will form a valid environment
variable!
::
$ docker run -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c export
$ docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export
declare -x HOME="/"
declare -x HOSTNAME="acda7f7b1cdc"
declare -x OLDPWD
@ -383,7 +383,7 @@ variable!
And we can use that information to connect from another container as a client::
$ docker run -i -t -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
$ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
172.17.0.32:6379>
VOLUME (Shared Filesystems)
@ -393,7 +393,7 @@ VOLUME (Shared Filesystems)
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
If "container-dir" is missing, then docker creates a new volume.
-volumes-from="": Mount all volumes from the given container(s)
--volumes-from="": Mount all volumes from the given container(s)
The volumes commands are complex enough to have their own
documentation in section :ref:`volume_def`. A developer can define one

View file

@ -43,26 +43,26 @@ Start actual redis server on one Docker host
.. code-block:: bash
big-server $ docker run -d -name redis crosbymichael/redis
big-server $ docker run -d --name redis crosbymichael/redis
Then add an ambassador linked to the redis server, mapping a port to the outside world
.. code-block:: bash
big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador
big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador
On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server``
.. code-block:: bash
client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
Then on the ``client-server`` host, you can use a redis client container to talk
to the remote redis server, just by linking to the local redis ambassador.
.. code-block:: bash
client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
@ -79,19 +79,19 @@ On the docker host (192.168.1.52) that redis will run on:
.. code-block:: bash
# start actual redis server
$ docker run -d -name redis crosbymichael/redis
$ docker run -d --name redis crosbymichael/redis
# get a redis-cli container for connection testing
$ docker pull relateiq/redis-cli
# test the redis server by talking to it directly
$ docker run -t -i -rm -link redis:redis relateiq/redis-cli
$ docker run -t -i --rm --link redis:redis relateiq/redis-cli
redis 172.17.0.136:6379> ping
PONG
^D
# add redis ambassador
$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh
$ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh
in the redis_ambassador container, you can see the linked redis containers's env
@ -119,7 +119,7 @@ This environment is used by the ambassador socat script to expose redis to the w
$ docker rm redis_ambassador
$ sudo ./contrib/mkimage-unittest.sh
$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh
$ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
@ -127,7 +127,7 @@ then ping the redis server via the ambassador
.. code-block::bash
$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
$ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
@ -136,7 +136,7 @@ Now goto a different server
.. code-block:: bash
$ sudo ./contrib/mkimage-unittest.sh
$ docker run -t -i -expose 6379 -name redis_ambassador docker-ut sh
$ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
@ -145,7 +145,7 @@ and get the redis-cli image so we can talk over the ambassador bridge
.. code-block:: bash
$ docker pull relateiq/redis-cli
$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
$ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
@ -157,7 +157,7 @@ When you start the container, it uses a small ``sed`` script to parse out the (p
link environment variables to set up the port forwarding. On the remote host, you need to set the
variable using the ``-e`` command line option.
``-expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the
``--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the
local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``.
@ -171,9 +171,9 @@ local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379
# docker build -t SvenDowideit/ambassador .
# docker tag SvenDowideit/ambassador ambassador
# then to run it (on the host that has the real backend on it)
# docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador
# docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador
# on the remote host, you can set up another ambassador
# docker run -t -i -name redis_ambassador -expose 6379 sh
# docker run -t -i --name redis_ambassador --expose 6379 sh
FROM docker-ut
MAINTAINER SvenDowideit@home.org.au

View file

@ -39,7 +39,7 @@ Repository to a local image cache.
12 character hash ``539c0211cd76: Download complete`` which is the
short form of the image ID. These short image IDs are the first 12
characters of the full image ID - which can be found using ``docker
inspect`` or ``docker images -notrunc=true``
inspect`` or ``docker images --no-trunc=true``
Running an interactive shell
----------------------------

View file

@ -121,8 +121,8 @@ Container intercommunication
The value of the Docker daemon's ``icc`` parameter determines whether
containers can communicate with each other over the bridge network.
- The default, ``-icc=true`` allows containers to communicate with each other.
- ``-icc=false`` means containers are isolated from each other.
- The default, ``--icc=true`` allows containers to communicate with each other.
- ``--icc=false`` means containers are isolated from each other.
Docker uses ``iptables`` under the hood to either accept or
drop communication between containers.

View file

@ -114,14 +114,14 @@ exposure, is possible because ``client`` is started after ``server``
has been started.
Here is a full example. On ``server``, the port of interest is
exposed. The exposure is done either through the ``-expose`` parameter
exposed. The exposure is done either through the ``--expose`` parameter
to the ``docker run`` command, or the ``EXPOSE`` build command in a
Dockerfile:
.. code-block:: bash
# Expose port 80
docker run -expose 80 --name server <image> <cmd>
docker run --expose 80 --name server <image> <cmd>
The ``client`` then links to the ``server``:

View file

@ -19,14 +19,14 @@ Container Naming
.. versionadded:: v0.6.5
You can now name your container by using the ``-name`` flag. If no
You can now name your container by using the ``--name`` flag. If no
name is provided, Docker will automatically generate a name. You can
see this name using the ``docker ps`` command.
.. code-block:: bash
# format is "sudo docker run -name <container_name> <image_name> <command>"
$ sudo docker run -name test ubuntu /bin/bash
# format is "sudo docker run --name <container_name> <image_name> <command>"
$ sudo docker run --name test ubuntu /bin/bash
# the flag "-a" Show all containers. Only running containers are shown by default.
$ sudo docker ps -a
@ -41,9 +41,9 @@ Links: service discovery for docker
.. versionadded:: v0.6.5
Links allow containers to discover and securely communicate with each
other by using the flag ``-link name:alias``. Inter-container
other by using the flag ``--link name:alias``. Inter-container
communication can be disabled with the daemon flag
``-icc=false``. With this flag set to ``false``, Container A cannot
``--icc=false``. With this flag set to ``false``, Container A cannot
access Container B unless explicitly allowed via a link. This is a
huge win for securing your containers. When two containers are linked
together Docker creates a parent child relationship between the
@ -63,7 +63,7 @@ based on that image and run it as a daemon.
.. code-block:: bash
$ sudo docker run -d -name redis crosbymichael/redis
$ sudo docker run -d --name redis crosbymichael/redis
We can issue all the commands that you would expect using the name
``redis``; start, stop, attach, using the name for our container. The
@ -77,9 +77,9 @@ we need to establish a link.
.. code-block:: bash
$ sudo docker run -t -i -link redis:db -name webapp ubuntu bash
$ sudo docker run -t -i --link redis:db --name webapp ubuntu bash
When you specified ``-link redis:db`` you are telling Docker to link
When you specified ``--link redis:db`` you are telling Docker to link
the container named ``redis`` into this new container with the alias
``db``. Environment variables are prefixed with the alias so that the
parent container can access network and environment information from

View file

@ -42,14 +42,14 @@ two new volumes::
This command will create the new container with two new volumes that
exits instantly (``true`` is pretty much the smallest, simplest program
that you can run). Once created you can mount its volumes in any other
container using the ``-volumes-from`` option; irrespective of whether the
container using the ``--volumes-from`` option; irrespective of whether the
container is running or not.
Or, you can use the VOLUME instruction in a Dockerfile to add one or more new
volumes to any container created from that image::
# BUILD-USING: docker build -t data .
# RUN-USING: docker run -name DATA data
# RUN-USING: docker run --name DATA data
FROM busybox
VOLUME ["/var/volume1", "/var/volume2"]
CMD ["/bin/true"]
@ -63,19 +63,19 @@ Data Volume Container, and then to mount the data from it.
Create a named container with volumes to share (``/var/volume1`` and ``/var/volume2``)::
$ docker run -v /var/volume1 -v /var/volume2 -name DATA busybox true
$ docker run -v /var/volume1 -v /var/volume2 --name DATA busybox true
Then mount those data volumes into your application containers::
$ docker run -t -i -rm -volumes-from DATA -name client1 ubuntu bash
$ docker run -t -i --rm --volumes-from DATA --name client1 ubuntu bash
You can use multiple ``-volumes-from`` parameters to bring together multiple
You can use multiple ``--volumes-from`` parameters to bring together multiple
data volumes from multiple containers.
Interestingly, you can mount the volumes that came from the ``DATA`` container in
yet another container via the ``client1`` middleman container::
$ docker run -t -i -rm -volumes-from client1 -name client2 ubuntu bash
$ docker run -t -i --rm --volumes-from client1 --name client2 ubuntu bash
This allows you to abstract the actual data source from users of that data,
similar to :ref:`ambassador_pattern_linking <ambassador_pattern_linking>`.
@ -131,7 +131,7 @@ data-container's volume. For example::
$ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data
* ``-rm`` - remove the container when it exits
* ``--rm`` - remove the container when it exits
* ``--volumes-from DATA`` - attach to the volumes shared by the ``DATA`` container
* ``-v $(pwd):/backup`` - bind mount the current directory into the container; to write the tar file to
* ``busybox`` - a small simpler image - good for quick maintenance
@ -142,11 +142,11 @@ Then to restore to the same container, or another that you've made elsewhere::
# create a new data container
$ sudo docker run -v /data -name DATA2 busybox true
# untar the backup files into the new container's data volume
$ sudo docker run -rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar
$ sudo docker run --rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar
data/
data/sven.txt
# compare to the original container
$ sudo docker run -rm --volumes-from DATA -v `pwd`:/backup busybox ls /data
$ sudo docker run --rm --volumes-from DATA -v `pwd`:/backup busybox ls /data
sven.txt

View file

@ -74,7 +74,7 @@ name or description:
Search the docker index for images
-notrunc=false: Don't truncate output
--no-trunc=false: Don't truncate output
$ sudo docker search centos
Found 25 results matching your query ("centos")
NAME DESCRIPTION

View file

@ -139,7 +139,7 @@ docker run \
-e AWS_ACCESS_KEY \
-e AWS_SECRET_KEY \
-e GPG_PASSPHRASE \
-i -t -privileged \
-i -t --privileged \
docker \
hack/release.sh
```
@ -173,7 +173,7 @@ docker run \
-e AWS_ACCESS_KEY \
-e AWS_SECRET_KEY \
-e GPG_PASSPHRASE \
-i -t -privileged \
-i -t --privileged \
docker \
hack/release.sh
```

View file

@ -5,7 +5,7 @@
# See the blog post: http://blog.docker.io/2013/09/docker-can-now-run-within-docker/
#
# This script should be executed inside a docker container in privilieged mode
# ('docker run -privileged', introduced in docker 0.6).
# ('docker run --privileged', introduced in docker 0.6).
# Usage: dind CMD [ARG...]
@ -17,7 +17,7 @@ CGROUP=/sys/fs/cgroup
mountpoint -q $CGROUP ||
mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
echo "Could not make a tmpfs mount. Did you use -privileged?"
echo "Could not make a tmpfs mount. Did you use --privileged?"
exit 1
}

View file

@ -1,8 +1,8 @@
# DOCKER-VERSION: 0.7.6
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
# DESCRIPTION: docker-ci continuous integration service
# TO_BUILD: docker build -rm -t docker-ci/docker-ci .
# TO_RUN: docker run -rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \
# TO_BUILD: docker build -t docker-ci/docker-ci .
# TO_RUN: docker run --rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \
# -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci
from ubuntu:12.04

View file

@ -57,8 +57,8 @@ Production deployment
export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS]
# Build docker-ci and testbuilder docker images
docker -H $DOCKER_PROD build -rm -t docker-ci/docker-ci .
(cd testbuilder; docker -H $DOCKER_PROD build -rm -t docker-ci/testbuilder .)
docker -H $DOCKER_PROD build -t docker-ci/docker-ci .
(cd testbuilder; docker -H $DOCKER_PROD build --rm -t docker-ci/testbuilder .)
# Run docker-ci container ( assuming no previous container running )
(cd dcr/prod; dcr docker-ci.yml start)

View file

@ -6,7 +6,7 @@ else
AWS_S3_BUCKET='get-staging.docker.io'
fi
docker run -rm -privileged -v /run:/var/socket \
docker run --rm --privileged -v /run:/var/socket \
-e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \
-e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \
-e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker

View file

@ -3,6 +3,6 @@ set -x
PROJECT_NAME=$(basename $0)
docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \
docker run --rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \
-v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3

View file

@ -1,5 +1,5 @@
# TO_BUILD: docker build -rm -no-cache -t docker-ci/testbuilder .
# TO_RUN: docker run -rm -u sysadmin \
# TO_BUILD: docker build --no-cache -t docker-ci/testbuilder .
# TO_RUN: docker run --rm -u sysadmin \
# -v /run:/var/socket docker-ci/testbuilder docker-registry
#

View file

@ -5,8 +5,8 @@ PROJECT_PATH=$1
# Build the docker project
cd /data/$PROJECT_PATH
sg docker -c "docker build -q -rm -t registry ."
cd test; sg docker -c "docker build -q -rm -t docker-registry-test ."
sg docker -c "docker build -q -t registry ."
cd test; sg docker -c "docker build -q -t docker-registry-test ."
# Run the tests
sg docker -c "docker run -rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test"
sg docker -c "docker run --rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test"

View file

@ -5,14 +5,14 @@ PROJECT_PATH=$1
# Build the docker project
cd /data/$PROJECT_PATH
sg docker -c "docker build -q -rm -t docker ."
sg docker -c "docker build -q -t docker ."
if [ "$DOCKER_RELEASE" == "1" ]; then
# Do nightly release
echo sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh"
echo sg docker -c "docker run --rm --privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh"
set +x
sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh"
sg docker -c "docker run --rm --privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh"
else
# Run the tests
sg docker -c "docker run -rm -privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh"
sg docker -c "docker run --rm --privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh"
fi

View file

@ -31,7 +31,7 @@ docker run -e AWS_S3_BUCKET=get-staging.docker.io \
-e AWS_ACCESS_KEY=AKI1234... \
-e AWS_SECRET_KEY=sEs4mE... \
-e GPG_PASSPHRASE=m0resEs4mE... \
-i -t -privileged \
-i -t --privileged \
docker ./hack/release.sh
EOF
exit 1

View file

@ -739,7 +739,7 @@ func TestRunAutoRemove(t *testing.T) {
c := make(chan struct{})
go func() {
defer close(c)
if err := cli.CmdRun("-rm", unitTestImageID, "hostname"); err != nil {
if err := cli.CmdRun("--rm", unitTestImageID, "hostname"); err != nil {
t.Fatal(err)
}
}()

View file

@ -1580,7 +1580,7 @@ func TestPrivilegedCanMknod(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, err := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
if output, err := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
t.Fatalf("Could not mknod into privileged container %s %v", output, err)
}
}
@ -1589,7 +1589,7 @@ func TestPrivilegedCanMount(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
if output, _ := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
t.Fatal("Could not mount into privileged container")
}
}

View file

@ -203,7 +203,7 @@ func TestCreateRmRunning(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, hostConfig, _, err := runconfig.Parse([]string{"-name", "foo", unitTestImageID, "sleep 300"}, nil)
config, hostConfig, _, err := runconfig.Parse([]string{"--name", "foo", unitTestImageID, "sleep 300"}, nil)
if err != nil {
t.Fatal(err)
}

View file

@ -20,21 +20,21 @@ func mustParse(t *testing.T, args string) (*Config, *HostConfig) {
}
func TestParseRunLinks(t *testing.T) {
if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
}
if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
}
if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
}
if _, _, err := parse(t, "-link a"); err == nil {
t.Fatalf("Error parsing links. `-link a` should be an error but is not")
if _, _, err := parse(t, "--link a"); err == nil {
t.Fatalf("Error parsing links. `--link a` should be an error but is not")
}
if _, _, err := parse(t, "-link"); err == nil {
t.Fatalf("Error parsing links. `-link` should be an error but is not")
if _, _, err := parse(t, "--link"); err == nil {
t.Fatalf("Error parsing links. `--link` should be an error but is not")
}
}
@ -73,8 +73,8 @@ func TestParseRunAttach(t *testing.T) {
if _, _, err := parse(t, "-a stderr -d"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not")
}
if _, _, err := parse(t, "-d -rm"); err == nil {
t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not")
if _, _, err := parse(t, "-d --rm"); err == nil {
t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not")
}
}

View file

@ -15,7 +15,7 @@ import (
var (
ErrInvalidWorikingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.")
ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d")
ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: -rm and -d")
ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d")
)
//FIXME Only used in tests
@ -74,7 +74,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host")
cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers")
cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)")
cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err