Merge branch 'master' into pluginflag
Conflicts: pkg/cgroups/cgroups.go pkg/libcontainer/nsinit/exec.go pkg/libcontainer/nsinit/init.go pkg/libcontainer/nsinit/mount.go runconfig/hostconfig.go runconfig/parse.go runtime/execdriver/driver.go runtime/execdriver/lxc/lxc_template.go runtime/execdriver/lxc/lxc_template_unit_test.go runtime/execdriver/native/default_template.go runtime/execdriver/native/driver.go Docker-DCO-1.1-Signed-off-by: Michael Crosby <michael@crosbymichael.com> (github: crosbymichael)
This commit is contained in:
commit
eab56ac007
172 changed files with 9894 additions and 1535 deletions
40
CHANGELOG.md
40
CHANGELOG.md
|
@ -1,5 +1,45 @@
|
|||
# Changelog
|
||||
|
||||
## 0.9.1 (2014-03-24)
|
||||
|
||||
#### Builder
|
||||
- Fix printing multiple messages on a single line. Fixes broken output during builds.
|
||||
|
||||
#### Documentation
|
||||
- Fix external link on security of containers.
|
||||
|
||||
#### Contrib
|
||||
- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly.
|
||||
- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile.
|
||||
|
||||
#### Hack
|
||||
- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh.
|
||||
|
||||
#### Remote API
|
||||
- Fix content-type detection in `docker cp`.
|
||||
|
||||
#### Runtime
|
||||
- Use BSD raw mode on Darwin. Fixes nano, tmux and others.
|
||||
- Only unshare the mount namespace for execin.
|
||||
- Retry to retrieve the layer metadata up to 5 times for `docker pull`.
|
||||
- Merge existing config when committing.
|
||||
- Fix panic in monitor.
|
||||
- Disable daemon startup timeout.
|
||||
- Fix issue #4681: add loopback interface when networking is disabled.
|
||||
- Add failing test case for issue #4681.
|
||||
- Send SIGTERM to child, instead of SIGKILL.
|
||||
- Show the driver and the kernel version in `docker info` even when not in debug mode.
|
||||
- Always symlink /dev/ptmx for libcontainer. This fixes console related problems.
|
||||
- Fix issue caused by the absence of /etc/apparmor.d.
|
||||
- Don't leave empty cidFile behind when failing to create the container.
|
||||
- Improve deprecation message.
|
||||
- Fix attach exit on darwin.
|
||||
- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping).
|
||||
- devicemapper: succeed immediately when removing non-existing devices.
|
||||
- devicemapper: increase timeout in waitClose to 10 seconds.
|
||||
- Remove goroutine leak on error.
|
||||
- Update parseLxcInfo to comply with new lxc1.0 format.
|
||||
|
||||
## 0.9.0 (2014-03-10)
|
||||
|
||||
#### Builder
|
||||
|
|
|
@ -126,33 +126,46 @@ For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
|
|||
The sign-off is a simple line at the end of the explanation for the
|
||||
patch, which certifies that you wrote it or otherwise have the right to
|
||||
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||
can certify the below:
|
||||
can certify the below (from
|
||||
[developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Docker Developer Certificate of Origin 1.1
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
By making a contribution to the Docker Project ("Project"), I represent and
|
||||
warrant that:
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
a. The contribution was created in whole or in part by me and I have the right
|
||||
to submit the contribution on my own behalf or on behalf of a third party who
|
||||
has authorized me to submit this contribution to the Project; or
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
b. The contribution is based upon previous work that, to the best of my
|
||||
knowledge, is covered under an appropriate open source license and I have the
|
||||
right and authorization to submit that work with modifications, whether
|
||||
created in whole or in part by me, under the same open source license (unless
|
||||
I am permitted to submit under a different license) that I have identified in
|
||||
the contribution; or
|
||||
|
||||
c. The contribution was provided directly to me by some other person who
|
||||
represented and warranted (a) or (b) and I have not modified it.
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
d. I understand and agree that this Project and the contribution are publicly
|
||||
known and that a record of the contribution (including all personal
|
||||
information I submit with it, including my sign-off record) is maintained
|
||||
indefinitely and may be redistributed consistent with this Project or the open
|
||||
source license(s) involved.
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
then you just add a line to every git commit message:
|
||||
|
|
|
@ -87,7 +87,7 @@ RUN git config --global user.email 'docker-dummy@example.com'
|
|||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/dotcloud/docker
|
||||
ENV DOCKER_BUILDTAGS apparmor
|
||||
ENV DOCKER_BUILDTAGS apparmor selinux
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
0.9.0-dev
|
||||
0.9.1-dev
|
||||
|
|
|
@ -208,6 +208,15 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
|||
}
|
||||
// Upload the build context
|
||||
v := &url.Values{}
|
||||
|
||||
//Check if the given image name can be resolved
|
||||
if *tag != "" {
|
||||
repository, _ := utils.ParseRepositoryTag(*tag)
|
||||
if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v.Set("t", *tag)
|
||||
|
||||
if *suppressOutput {
|
||||
|
@ -498,8 +507,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
|||
}
|
||||
|
||||
func (cli *DockerCli) CmdStop(args ...string) error {
|
||||
cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM)")
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop.")
|
||||
cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)")
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -526,7 +535,7 @@ func (cli *DockerCli) CmdStop(args ...string) error {
|
|||
|
||||
func (cli *DockerCli) CmdRestart(args ...string) error {
|
||||
cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop. Default=10")
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -1003,6 +1012,14 @@ func (cli *DockerCli) CmdImport(args ...string) error {
|
|||
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
|
||||
}
|
||||
v := url.Values{}
|
||||
|
||||
if repository != "" {
|
||||
//Check if the given image name can be resolved
|
||||
if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v.Set("repo", repository)
|
||||
v.Set("tag", tag)
|
||||
v.Set("fromSrc", src)
|
||||
|
@ -1453,6 +1470,13 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
//Check if the given image name can be resolved
|
||||
if repository != "" {
|
||||
if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("container", name)
|
||||
v.Set("repo", repository)
|
||||
|
@ -1741,6 +1765,11 @@ func (cli *DockerCli) CmdTag(args ...string) error {
|
|||
}
|
||||
|
||||
v := url.Values{}
|
||||
|
||||
//Check if the given image name can be resolved
|
||||
if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set("repo", repository)
|
||||
v.Set("tag", tag)
|
||||
|
||||
|
@ -2044,7 +2073,9 @@ func (cli *DockerCli) CmdCp(args ...string) error {
|
|||
}
|
||||
|
||||
func (cli *DockerCli) CmdSave(args ...string) error {
|
||||
cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)")
|
||||
cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)")
|
||||
outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT")
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2054,8 +2085,18 @@ func (cli *DockerCli) CmdSave(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
output io.Writer = cli.out
|
||||
err error
|
||||
)
|
||||
if *outfile != "" {
|
||||
output, err = os.Create(*outfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
image := cmd.Arg(0)
|
||||
if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil {
|
||||
if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -2063,6 +2104,8 @@ func (cli *DockerCli) CmdSave(args ...string) error {
|
|||
|
||||
func (cli *DockerCli) CmdLoad(args ...string) error {
|
||||
cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN")
|
||||
infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN")
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2072,7 +2115,17 @@ func (cli *DockerCli) CmdLoad(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil {
|
||||
var (
|
||||
input io.Reader = cli.in
|
||||
err error
|
||||
)
|
||||
if *infile != "" {
|
||||
input, err = os.Open(*infile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
package builtins
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/engine"
|
||||
|
||||
"github.com/dotcloud/docker/api"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver/lxc"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver/bridge"
|
||||
"github.com/dotcloud/docker/server"
|
||||
)
|
||||
|
||||
|
@ -35,5 +34,5 @@ func remote(eng *engine.Engine) {
|
|||
//
|
||||
func daemon(eng *engine.Engine) {
|
||||
eng.Register("initserver", server.InitServer)
|
||||
eng.Register("init_networkdriver", lxc.InitDriver)
|
||||
eng.Register("init_networkdriver", bridge.InitDriver)
|
||||
}
|
||||
|
|
|
@ -26,20 +26,20 @@ end
|
|||
function __fish_print_docker_containers --description 'Print a list of docker containers' -a select
|
||||
switch $select
|
||||
case running
|
||||
docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
|
||||
docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
|
||||
case stopped
|
||||
docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
|
||||
docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
|
||||
case all
|
||||
docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n'
|
||||
docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n'
|
||||
end
|
||||
end
|
||||
|
||||
function __fish_print_docker_images --description 'Print a list of docker images'
|
||||
docker images | awk 'NR>1' | grep -v '<none>' | awk '{print $1":"$2}'
|
||||
docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1":"$2}'
|
||||
end
|
||||
|
||||
function __fish_print_docker_repositories --description 'Print a list of docker repositories'
|
||||
docker images | awk 'NR>1' | grep -v '<none>' | awk '{print $1}' | sort | uniq
|
||||
docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1}' | sort | uniq
|
||||
end
|
||||
|
||||
# common options
|
||||
|
|
|
@ -37,5 +37,5 @@ script
|
|||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
"$DOCKER" -d $DOCKER_OPTS
|
||||
exec "$DOCKER" -d $DOCKER_OPTS
|
||||
end script
|
||||
|
|
|
@ -63,7 +63,7 @@ For Docker containers using cgroups, the container name will be the
|
|||
full ID or long ID of the container. If a container shows up as
|
||||
ae836c95b4c3 in ``docker ps``, its long ID might be something like
|
||||
``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You
|
||||
can look it up with ``docker inspect`` or ``docker ps -notrunc``.
|
||||
can look it up with ``docker inspect`` or ``docker ps --no-trunc``.
|
||||
|
||||
Putting everything together to look at the memory metrics for a Docker
|
||||
container, take a look at ``/sys/fs/cgroup/memory/lxc/<longid>/``.
|
||||
|
|
|
@ -47,7 +47,7 @@ divert ``/sbin/initctl`` to ``/bin/true`` so it thinks everything is working.
|
|||
|
||||
# Hack for initctl not being available in Ubuntu
|
||||
RUN dpkg-divert --local --rename --add /sbin/initctl
|
||||
RUN ln -s /bin/true /sbin/initctl
|
||||
RUN ln -sf /bin/true /sbin/initctl
|
||||
|
||||
Afterwards we'll be able to update our apt repositories and install MongoDB
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ And run the PostgreSQL server container (in the foreground):
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run --rm -P -name pg_test eg_postgresql
|
||||
$ sudo docker run --rm -P --name pg_test eg_postgresql
|
||||
|
||||
There are 2 ways to connect to the PostgreSQL server. We can use
|
||||
:ref:`working_with_links_names`, or we can access it from our host (or the network).
|
||||
|
|
|
@ -88,7 +88,7 @@ Almost there. Next, we add a hack to get us by the lack of ``initctl``:
|
|||
# Hack for initctl
|
||||
# See: https://github.com/dotcloud/docker/issues/1024
|
||||
RUN dpkg-divert --local --rename --add /sbin/initctl
|
||||
RUN ln -s /bin/true /sbin/initctl
|
||||
RUN ln -sf /bin/true /sbin/initctl
|
||||
|
||||
Then, we expose the Riak Protocol Buffers and HTTP interfaces, along with SSH:
|
||||
|
||||
|
|
|
@ -29,6 +29,12 @@ To run properly, docker needs the following software to be installed at runtime:
|
|||
- iptables version 1.4 or later
|
||||
- Git version 1.7 or later
|
||||
- XZ Utils 4.9 or later
|
||||
- a `properly mounted
|
||||
<https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount>`_
|
||||
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point `is
|
||||
<https://github.com/dotcloud/docker/issues/2683>`_ `not
|
||||
<https://github.com/dotcloud/docker/issues/3485>`_ `sufficient
|
||||
<https://github.com/dotcloud/docker/issues/4568>`_)
|
||||
|
||||
|
||||
Check kernel dependencies
|
||||
|
|
|
@ -432,7 +432,7 @@ Stop a container
|
|||
|
||||
HTTP/1.1 204 OK
|
||||
|
||||
:query t: number of seconds to wait for the container to stop
|
||||
:query t: number of seconds to wait before killing the container
|
||||
:statuscode 204: no error
|
||||
:statuscode 404: no such container
|
||||
:statuscode 500: server error
|
||||
|
@ -457,7 +457,7 @@ Restart a container
|
|||
|
||||
HTTP/1.1 204 OK
|
||||
|
||||
:query t: number of seconds to wait for the container to stop
|
||||
:query t: number of seconds to wait before killing the container
|
||||
:statuscode 204: no error
|
||||
:statuscode 404: no such container
|
||||
:statuscode 500: server error
|
||||
|
|
|
@ -49,3 +49,5 @@ and we will add the libraries here.
|
|||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Perl | Net::Docker | https://metacpan.org/pod/Net::Docker | Active |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Perl | Eixo::Docker | https://github.com/alambike/eixo-docker | Active |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
|
|
|
@ -881,10 +881,32 @@ Known Issues (kill)
|
|||
|
||||
::
|
||||
|
||||
Usage: docker load < repository.tar
|
||||
Usage: docker load
|
||||
|
||||
Load an image from a tar archive on STDIN
|
||||
|
||||
-i, --input="": Read from a tar archive file, instead of STDIN
|
||||
|
||||
Loads a tarred repository from a file or the standard input stream.
|
||||
Restores both images and tags.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
|
||||
$ sudo docker load < busybox.tar
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
|
||||
busybox latest 769b9341d937 7 weeks ago 2.489 MB
|
||||
$ sudo docker load --input fedora.tar
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
|
||||
busybox latest 769b9341d937 7 weeks ago 2.489 MB
|
||||
fedora rawhide 0d20aec6529d 7 weeks ago 387 MB
|
||||
fedora 20 58394af37342 7 weeks ago 385.5 MB
|
||||
fedora heisenbug 58394af37342 7 weeks ago 385.5 MB
|
||||
fedora latest 58394af37342 7 weeks ago 385.5 MB
|
||||
|
||||
Loads a tarred repository from the standard input stream.
|
||||
Restores both images and tags.
|
||||
|
||||
.. _cli_login:
|
||||
|
||||
|
@ -1145,7 +1167,7 @@ image is removed.
|
|||
--volumes-from="": Mount all volumes from the given container(s)
|
||||
--entrypoint="": Overwrite the default entrypoint set by the image
|
||||
-w, --workdir="": Working directory inside the container
|
||||
--lxc-conf=[]: Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
|
||||
--lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
|
||||
--sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
|
||||
--expose=[]: Expose a port from the container without publishing it to your host
|
||||
--link="": Add link to another container (name:alias)
|
||||
|
@ -1317,10 +1339,27 @@ This example shows 5 containers that might be set up to test a web application c
|
|||
|
||||
::
|
||||
|
||||
Usage: docker save image > repository.tar
|
||||
Usage: docker save IMAGE
|
||||
|
||||
Save an image to a tar archive (streamed to stdout by default)
|
||||
|
||||
-o, --output="": Write to an file, instead of STDOUT
|
||||
|
||||
|
||||
Produces a tarred repository to the standard output stream.
|
||||
Contains all parent layers, and all tags + versions, or specified repo:tag.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker save busybox > busybox.tar
|
||||
$ ls -sh b.tar
|
||||
2.7M b.tar
|
||||
$ sudo docker save --output busybox.tar busybox
|
||||
$ ls -sh b.tar
|
||||
2.7M b.tar
|
||||
$ sudo docker save -o fedora-all.tar fedora
|
||||
$ sudo docker save -o fedora-latest.tar fedora:latest
|
||||
|
||||
Streams a tarred repository to the standard output stream.
|
||||
Contains all parent layers, and all tags + versions.
|
||||
|
||||
.. _cli_search:
|
||||
|
||||
|
@ -1360,11 +1399,11 @@ This example shows 5 containers that might be set up to test a web application c
|
|||
|
||||
Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...]
|
||||
|
||||
Stop a running container (Send SIGTERM)
|
||||
Stop a running container (Send SIGTERM, and then SIGKILL after grace period)
|
||||
|
||||
-t, --time=10: Number of seconds to wait for the container to stop.
|
||||
-t, --time=10: Number of seconds to wait for the container to stop before killing it.
|
||||
|
||||
The main process inside the container will receive SIGTERM.
|
||||
The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL
|
||||
|
||||
.. _cli_tag:
|
||||
|
||||
|
|
|
@ -194,7 +194,7 @@ Runtime Privilege and LXC Configuration
|
|||
::
|
||||
|
||||
--privileged=false: Give extended privileges to this container
|
||||
--lxc-conf=[]: Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
|
||||
--lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
|
||||
|
||||
By default, Docker containers are "unprivileged" and cannot, for
|
||||
example, run a Docker daemon inside a Docker container. This is
|
||||
|
@ -211,12 +211,13 @@ host. Additional information about running with ``--privileged`` is
|
|||
available on the `Docker Blog
|
||||
<http://blog.docker.io/2013/09/docker-can-now-run-within-docker/>`_.
|
||||
|
||||
An operator can also specify LXC options using one or more
|
||||
``--lxc-conf`` parameters. These can be new parameters or override
|
||||
existing parameters from the lxc-template.go_. Note that in the
|
||||
future, a given host's Docker daemon may not use LXC, so this is an
|
||||
implementation-specific configuration meant for operators already
|
||||
familiar with using LXC directly.
|
||||
If the Docker daemon was started using the ``lxc`` exec-driver
|
||||
(``docker -d --exec-driver=lxc``) then the operator can also specify
|
||||
LXC options using one or more ``--lxc-conf`` parameters. These can be
|
||||
new parameters or override existing parameters from the lxc-template.go_.
|
||||
Note that in the future, a given host's Docker daemon may not use LXC,
|
||||
so this is an implementation-specific configuration meant for operators
|
||||
already familiar with using LXC directly.
|
||||
|
||||
.. _lxc-template.go: https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go
|
||||
|
||||
|
|
|
@ -40,6 +40,8 @@ Repository to a local image cache.
|
|||
short form of the image ID. These short image IDs are the first 12
|
||||
characters of the full image ID - which can be found using ``docker
|
||||
inspect`` or ``docker images --no-trunc=true``
|
||||
|
||||
**If you're using OS X** then you shouldn't use ``sudo``
|
||||
|
||||
Running an interactive shell
|
||||
----------------------------
|
||||
|
|
95
docs/sources/use/chef.rst
Normal file
95
docs/sources/use/chef.rst
Normal file
|
@ -0,0 +1,95 @@
|
|||
:title: Chef Usage
|
||||
:description: Installation and using Docker via Chef
|
||||
:keywords: chef, installation, usage, docker, documentation
|
||||
|
||||
.. _install_using_chef:
|
||||
|
||||
Using Chef
|
||||
=============
|
||||
|
||||
.. note::
|
||||
|
||||
Please note this is a community contributed installation path. The
|
||||
only 'official' installation is using the :ref:`ubuntu_linux`
|
||||
installation path. This version may sometimes be out of date.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
To use this guide you'll need a working installation of
|
||||
`Chef <http://www.getchef.com/>`_. This cookbook supports a variety of
|
||||
operating systems.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
The cookbook is available on the `Chef Community Site
|
||||
<community.opscode.com/cookbooks/docker>`_ and can be installed
|
||||
using your favorite cookbook dependency manager.
|
||||
|
||||
The source can be found on `GitHub
|
||||
<https://github.com/bflad/chef-docker>`_.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
The cookbook provides recipes for installing Docker, configuring init
|
||||
for Docker, and resources for managing images and containers.
|
||||
It supports almost all Docker functionality.
|
||||
|
||||
Installation
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
include_recipe 'docker'
|
||||
|
||||
Images
|
||||
~~~~~~
|
||||
|
||||
The next step is to pull a Docker image. For this, we have a resource:
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
docker_image 'samalba/docker-registry'
|
||||
|
||||
This is equivalent to running:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker pull samalba/docker-registry
|
||||
|
||||
There are attributes available to control how long the cookbook
|
||||
will allow for downloading (5 minute default).
|
||||
|
||||
To remove images you no longer need:
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
docker_image 'samalba/docker-registry' do
|
||||
action :remove
|
||||
end
|
||||
|
||||
Containers
|
||||
~~~~~~~~~~
|
||||
|
||||
Now you have an image where you can run commands within a container
|
||||
managed by Docker.
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
docker_container 'samalba/docker-registry' do
|
||||
detach true
|
||||
port '5000:5000'
|
||||
env 'SETTINGS_FLAVOR=local'
|
||||
volume '/mnt/docker:/docker-storage'
|
||||
end
|
||||
|
||||
This is equivalent to running the following command, but under upstart:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry
|
||||
|
||||
The resources will accept a single string or an array of values
|
||||
for any docker flags that allow multiple values.
|
|
@ -20,4 +20,5 @@ Contents:
|
|||
working_with_volumes
|
||||
working_with_links_names
|
||||
ambassador_pattern_linking
|
||||
chef
|
||||
puppet
|
||||
|
|
|
@ -129,7 +129,7 @@ because they are external to images.
|
|||
Instead you can use ``--volumes-from`` to start a new container that can access the
|
||||
data-container's volume. For example::
|
||||
|
||||
$ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data
|
||||
$ sudo docker run --rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data
|
||||
|
||||
* ``--rm`` - remove the container when it exits
|
||||
* ``--volumes-from DATA`` - attach to the volumes shared by the ``DATA`` container
|
||||
|
@ -140,7 +140,7 @@ data-container's volume. For example::
|
|||
Then to restore to the same container, or another that you've made elsewhere::
|
||||
|
||||
# create a new data container
|
||||
$ sudo docker run -v /data -name DATA2 busybox true
|
||||
$ sudo docker run -v /data --name DATA2 busybox true
|
||||
# untar the backup files into the new container's data volume
|
||||
$ sudo docker run --rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar
|
||||
data/
|
||||
|
|
|
@ -189,7 +189,7 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i
|
|||
}
|
||||
|
||||
// Create root filesystem in the driver
|
||||
if err := graph.driver.Create(img.ID, img.Parent); err != nil {
|
||||
if err := graph.driver.Create(img.ID, img.Parent, ""); err != nil {
|
||||
return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err)
|
||||
}
|
||||
// Mount the root filesystem so we can apply the diff/layer
|
||||
|
|
|
@ -177,6 +177,13 @@ export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
|
|||
|
||||
NOTE: if you need to set more than one build tag, space separate them.
|
||||
|
||||
If you're building a binary that may need to be used on platforms that include
|
||||
SELinux, you will need to set `DOCKER_BUILDTAGS` as follows:
|
||||
|
||||
```bash
|
||||
export DOCKER_BUILDTAGS='selinux'
|
||||
```
|
||||
|
||||
### Static Daemon
|
||||
|
||||
If it is feasible within the constraints of your distribution, you should
|
||||
|
@ -259,6 +266,12 @@ installed and available at runtime:
|
|||
|
||||
* iptables version 1.4 or later
|
||||
* XZ Utils version 4.9 or later
|
||||
* a [properly
|
||||
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
|
||||
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
|
||||
[is](https://github.com/dotcloud/docker/issues/2683)
|
||||
[not](https://github.com/dotcloud/docker/issues/3485)
|
||||
[sufficient](https://github.com/dotcloud/docker/issues/4568))
|
||||
|
||||
Additionally, the Docker client needs the following software to be installed and
|
||||
available at runtime:
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
# DOCKER-VERSION: 0.7.6
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
# DESCRIPTION: docker-ci continuous integration service
|
||||
# TO_BUILD: docker build -t docker-ci/docker-ci .
|
||||
# TO_RUN: docker run --rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \
|
||||
# -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci
|
||||
|
||||
from ubuntu:12.04
|
||||
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \
|
||||
/etc/apt/sources.list; apt-get update
|
||||
RUN apt-get install -y --no-install-recommends python2.7 python-dev \
|
||||
libevent-dev git supervisor ssh rsync less vim sudo gcc wget nginx
|
||||
RUN cd /tmp; wget http://python-distribute.org/distribute_setup.py
|
||||
RUN cd /tmp; python distribute_setup.py; easy_install pip; rm distribute_setup.py
|
||||
|
||||
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
RUN echo 'deb http://get.docker.io/ubuntu docker main' > \
|
||||
/etc/apt/sources.list.d/docker.list; apt-get update
|
||||
RUN apt-get install -y lxc-docker-0.8.0
|
||||
RUN pip install SQLAlchemy==0.7.10 buildbot buildbot-slave pyopenssl boto
|
||||
RUN ln -s /var/socket/docker.sock /run/docker.sock
|
||||
|
||||
ADD . /docker-ci
|
||||
RUN /docker-ci/setup.sh
|
||||
|
||||
ENTRYPOINT ["supervisord", "-n"]
|
|
@ -1 +0,0 @@
|
|||
Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)
|
|
@ -1,65 +0,0 @@
|
|||
=========
|
||||
docker-ci
|
||||
=========
|
||||
|
||||
This directory contains docker-ci continuous integration system.
|
||||
As expected, it is a fully dockerized and deployed using
|
||||
docker-container-runner.
|
||||
docker-ci is based on Buildbot, a continuous integration system designed
|
||||
to automate the build/test cycle. By automatically rebuilding and testing
|
||||
the tree each time something has changed, build problems are pinpointed
|
||||
quickly, before other developers are inconvenienced by the failure.
|
||||
We are running buildbot at Rackspace to verify docker and docker-registry
|
||||
pass tests, and check for coverage code details.
|
||||
|
||||
docker-ci instance is at https://docker-ci.docker.io/waterfall
|
||||
|
||||
Inside docker-ci container we have the following directory structure:
|
||||
|
||||
/docker-ci source code of docker-ci
|
||||
/data/backup/docker-ci/ daily backup (replicated over S3)
|
||||
/data/docker-ci/coverage/{docker,docker-registry}/ mapped to host volumes
|
||||
/data/buildbot/{master,slave}/ main docker-ci buildbot config and database
|
||||
/var/socket/{docker.sock} host volume access to docker socket
|
||||
|
||||
|
||||
Production deployment
|
||||
=====================
|
||||
|
||||
::
|
||||
|
||||
# Clone docker-ci repository
|
||||
git clone https://github.com/dotcloud/docker
|
||||
cd docker/hack/infrastructure/docker-ci
|
||||
|
||||
export DOCKER_PROD=[PRODUCTION_SERVER_IP]
|
||||
|
||||
# Create data host volume. (only once)
|
||||
docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \
|
||||
mkdir -p /data/docker-ci/coverage/docker
|
||||
docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \
|
||||
mkdir -p /data/docker-ci/coverage/docker-registry
|
||||
docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \
|
||||
chown -R 1000.1000 /data/docker-ci
|
||||
|
||||
# dcr deployment. Define credentials and special environment dcr variables
|
||||
# ( retrieved at /hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml )
|
||||
export WEB_USER=[DOCKER-CI-WEBSITE-USERNAME]
|
||||
export WEB_IRC_PWD=[DOCKER-CI-WEBSITE-PASSWORD]
|
||||
export BUILDBOT_PWD=[BUILDSLAVE_PASSWORD]
|
||||
export AWS_ACCESS_KEY=[DOCKER_RELEASE_S3_ACCESS]
|
||||
export AWS_SECRET_KEY=[DOCKER_RELEASE_S3_SECRET]
|
||||
export GPG_PASSPHRASE=[DOCKER_RELEASE_PASSPHRASE]
|
||||
export BACKUP_AWS_ID=[S3_BUCKET_CREDENTIAL_ACCESS]
|
||||
export BACKUP_AWS_SECRET=[S3_BUCKET_CREDENTIAL_SECRET]
|
||||
export SMTP_USER=[MAILGUN_SMTP_USERNAME]
|
||||
export SMTP_PWD=[MAILGUN_SMTP_PASSWORD]
|
||||
export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS]
|
||||
|
||||
# Build docker-ci and testbuilder docker images
|
||||
docker -H $DOCKER_PROD build -t docker-ci/docker-ci .
|
||||
(cd testbuilder; docker -H $DOCKER_PROD build --rm -t docker-ci/testbuilder .)
|
||||
|
||||
# Run docker-ci container ( assuming no previous container running )
|
||||
(cd dcr/prod; dcr docker-ci.yml start)
|
||||
(cd dcr/prod; dcr docker-ci.yml register docker-ci.docker.io)
|
|
@ -1 +0,0 @@
|
|||
0.5.6
|
|
@ -1,176 +0,0 @@
|
|||
# This file is part of Buildbot. Buildbot is free software: you can
|
||||
# redistribute it and/or modify it under the terms of the GNU General Public
|
||||
# License as published by the Free Software Foundation, version 2.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
# details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along with
|
||||
# this program; if not, write to the Free Software Foundation, Inc., 51
|
||||
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Copyright Buildbot Team Members
|
||||
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
github_buildbot.py is based on git_buildbot.py
|
||||
|
||||
github_buildbot.py will determine the repository information from the JSON
|
||||
HTTP POST it receives from github.com and build the appropriate repository.
|
||||
If your github repository is private, you must add a ssh key to the github
|
||||
repository for the user who initiated the build on the buildslave.
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
import datetime
|
||||
from twisted.python import log
|
||||
import calendar
|
||||
|
||||
try:
|
||||
import json
|
||||
assert json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
# python is silly about how it handles timezones
|
||||
class fixedOffset(datetime.tzinfo):
|
||||
"""
|
||||
fixed offset timezone
|
||||
"""
|
||||
def __init__(self, minutes, hours, offsetSign = 1):
|
||||
self.minutes = int(minutes) * offsetSign
|
||||
self.hours = int(hours) * offsetSign
|
||||
self.offset = datetime.timedelta(minutes = self.minutes,
|
||||
hours = self.hours)
|
||||
|
||||
def utcoffset(self, dt):
|
||||
return self.offset
|
||||
|
||||
def dst(self, dt):
|
||||
return datetime.timedelta(0)
|
||||
|
||||
def convertTime(myTestTimestamp):
|
||||
#"1970-01-01T00:00:00+00:00"
|
||||
# Normalize myTestTimestamp
|
||||
if myTestTimestamp[-1] == 'Z':
|
||||
myTestTimestamp = myTestTimestamp[:-1] + '-00:00'
|
||||
matcher = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)([-+])(\d\d):(\d\d)')
|
||||
result = matcher.match(myTestTimestamp)
|
||||
(year, month, day, hour, minute, second, offsetsign, houroffset, minoffset) = \
|
||||
result.groups()
|
||||
if offsetsign == '+':
|
||||
offsetsign = 1
|
||||
else:
|
||||
offsetsign = -1
|
||||
|
||||
offsetTimezone = fixedOffset( minoffset, houroffset, offsetsign )
|
||||
myDatetime = datetime.datetime( int(year),
|
||||
int(month),
|
||||
int(day),
|
||||
int(hour),
|
||||
int(minute),
|
||||
int(second),
|
||||
0,
|
||||
offsetTimezone)
|
||||
return calendar.timegm( myDatetime.utctimetuple() )
|
||||
|
||||
def getChanges(request, options = None):
|
||||
"""
|
||||
Reponds only to POST events and starts the build process
|
||||
|
||||
:arguments:
|
||||
request
|
||||
the http request object
|
||||
"""
|
||||
payload = json.loads(request.args['payload'][0])
|
||||
import urllib,datetime
|
||||
fname = str(datetime.datetime.now()).replace(' ','_').replace(':','-')[:19]
|
||||
# Github event debug
|
||||
# open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2))
|
||||
|
||||
if 'pull_request' in payload:
|
||||
user = payload['pull_request']['user']['login']
|
||||
repo = payload['pull_request']['head']['repo']['name']
|
||||
repo_url = payload['pull_request']['head']['repo']['html_url']
|
||||
else:
|
||||
user = payload['repository']['owner']['name']
|
||||
repo = payload['repository']['name']
|
||||
repo_url = payload['repository']['url']
|
||||
project = request.args.get('project', None)
|
||||
if project:
|
||||
project = project[0]
|
||||
elif project is None:
|
||||
project = ''
|
||||
# This field is unused:
|
||||
#private = payload['repository']['private']
|
||||
changes = process_change(payload, user, repo, repo_url, project)
|
||||
log.msg("Received %s changes from github" % len(changes))
|
||||
return (changes, 'git')
|
||||
|
||||
def process_change(payload, user, repo, repo_url, project):
|
||||
"""
|
||||
Consumes the JSON as a python object and actually starts the build.
|
||||
|
||||
:arguments:
|
||||
payload
|
||||
Python Object that represents the JSON sent by GitHub Service
|
||||
Hook.
|
||||
"""
|
||||
changes = []
|
||||
|
||||
newrev = payload['after'] if 'after' in payload else payload['pull_request']['head']['sha']
|
||||
refname = payload['ref'] if 'ref' in payload else payload['pull_request']['head']['ref']
|
||||
|
||||
# We only care about regular heads, i.e. branches
|
||||
match = re.match(r"^(refs\/heads\/|)([^/]+)$", refname)
|
||||
if not match:
|
||||
log.msg("Ignoring refname `%s': Not a branch" % refname)
|
||||
return []
|
||||
|
||||
branch = match.groups()[1]
|
||||
if re.match(r"^0*$", newrev):
|
||||
log.msg("Branch `%s' deleted, ignoring" % branch)
|
||||
return []
|
||||
else:
|
||||
if 'pull_request' in payload:
|
||||
if payload['action'] == 'closed':
|
||||
log.msg("PR#{} closed, ignoring".format(payload['number']))
|
||||
return []
|
||||
changes = [{
|
||||
'category' : 'github_pullrequest',
|
||||
'who' : '{0} - PR#{1}'.format(user,payload['number']),
|
||||
'files' : [],
|
||||
'comments' : payload['pull_request']['title'],
|
||||
'revision' : newrev,
|
||||
'when' : convertTime(payload['pull_request']['updated_at']),
|
||||
'branch' : branch,
|
||||
'revlink' : '{0}/commit/{1}'.format(repo_url,newrev),
|
||||
'repository' : repo_url,
|
||||
'project' : project }]
|
||||
return changes
|
||||
for commit in payload['commits']:
|
||||
files = []
|
||||
if 'added' in commit:
|
||||
files.extend(commit['added'])
|
||||
if 'modified' in commit:
|
||||
files.extend(commit['modified'])
|
||||
if 'removed' in commit:
|
||||
files.extend(commit['removed'])
|
||||
when = convertTime( commit['timestamp'])
|
||||
log.msg("New revision: %s" % commit['id'][:8])
|
||||
chdict = dict(
|
||||
who = commit['author']['name']
|
||||
+ " <" + commit['author']['email'] + ">",
|
||||
files = files,
|
||||
comments = commit['message'],
|
||||
revision = commit['id'],
|
||||
when = when,
|
||||
branch = branch,
|
||||
revlink = commit['url'],
|
||||
repository = repo_url,
|
||||
project = project)
|
||||
changes.append(chdict)
|
||||
return changes
|
|
@ -1,161 +0,0 @@
|
|||
import os, re
|
||||
from buildbot.buildslave import BuildSlave
|
||||
from buildbot.schedulers.forcesched import ForceScheduler
|
||||
from buildbot.schedulers.basic import SingleBranchScheduler
|
||||
from buildbot.schedulers.timed import Nightly
|
||||
from buildbot.changes import filter
|
||||
from buildbot.config import BuilderConfig
|
||||
from buildbot.process.factory import BuildFactory
|
||||
from buildbot.process.properties import Property
|
||||
from buildbot.steps.shell import ShellCommand
|
||||
from buildbot.status import html, words
|
||||
from buildbot.status.web import authz, auth
|
||||
from buildbot.status.mail import MailNotifier
|
||||
|
||||
|
||||
def ENV(x):
|
||||
'''Promote an environment variable for global use returning its value'''
|
||||
retval = os.environ.get(x, '')
|
||||
globals()[x] = retval
|
||||
return retval
|
||||
|
||||
|
||||
class TestCommand(ShellCommand):
|
||||
'''Extend ShellCommand with optional summary logs'''
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TestCommand, self).__init__(*args, **kwargs)
|
||||
|
||||
def createSummary(self, log):
|
||||
exit_status = re.sub(r'.+\n\+ exit (\d+).+',
|
||||
r'\1', log.getText()[-100:], flags=re.DOTALL)
|
||||
if exit_status != '0':
|
||||
return
|
||||
# Infer coverage path from log
|
||||
if '+ COVERAGE_PATH' in log.getText():
|
||||
path = re.sub(r'.+\+ COVERAGE_PATH=((.+?)-\d+).+',
|
||||
r'\2/\1', log.getText(), flags=re.DOTALL)
|
||||
url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path)
|
||||
self.addURL('coverage', url)
|
||||
elif 'COVERAGE_FILE' in log.getText():
|
||||
path = re.sub(r'.+\+ COVERAGE_FILE=((.+?)-\d+).+',
|
||||
r'\2/\1', log.getText(), flags=re.DOTALL)
|
||||
url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path)
|
||||
self.addURL('coverage', url)
|
||||
|
||||
|
||||
PORT_WEB = 8000 # Buildbot webserver port
|
||||
PORT_GITHUB = 8011 # Buildbot github hook port
|
||||
PORT_MASTER = 9989 # Port where buildbot master listen buildworkers
|
||||
|
||||
BUILDBOT_URL = '//localhost:{}/'.format(PORT_WEB)
|
||||
DOCKER_REPO = 'https://github.com/docker-test/docker'
|
||||
DOCKER_TEST_ARGV = 'HEAD {}'.format(DOCKER_REPO)
|
||||
REGISTRY_REPO = 'https://github.com/docker-test/docker-registry'
|
||||
REGISTRY_TEST_ARGV = 'HEAD {}'.format(REGISTRY_REPO)
|
||||
if ENV('DEPLOYMENT') == 'staging':
|
||||
BUILDBOT_URL = "//docker-ci-stage.docker.io/"
|
||||
if ENV('DEPLOYMENT') == 'production':
|
||||
BUILDBOT_URL = '//docker-ci.docker.io/'
|
||||
DOCKER_REPO = 'https://github.com/dotcloud/docker'
|
||||
DOCKER_TEST_ARGV = ''
|
||||
REGISTRY_REPO = 'https://github.com/dotcloud/docker-registry'
|
||||
REGISTRY_TEST_ARGV = ''
|
||||
|
||||
# Credentials set by setup.sh from deployment.py
|
||||
ENV('WEB_USER')
|
||||
ENV('WEB_IRC_PWD')
|
||||
ENV('BUILDBOT_PWD')
|
||||
ENV('SMTP_USER')
|
||||
ENV('SMTP_PWD')
|
||||
ENV('EMAIL_RCP')
|
||||
ENV('IRC_CHANNEL')
|
||||
|
||||
|
||||
c = BuildmasterConfig = {}
|
||||
|
||||
c['title'] = "docker-ci"
|
||||
c['titleURL'] = "waterfall"
|
||||
c['buildbotURL'] = BUILDBOT_URL
|
||||
c['db'] = {'db_url':"sqlite:///state.sqlite"}
|
||||
c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)]
|
||||
c['slavePortnum'] = PORT_MASTER
|
||||
|
||||
|
||||
# Schedulers
|
||||
c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[
|
||||
'docker', 'docker-registry', 'nightlyrelease', 'backup'])]
|
||||
c['schedulers'] += [SingleBranchScheduler(name="docker", treeStableTimer=None,
|
||||
change_filter=filter.ChangeFilter(branch='master',
|
||||
repository=DOCKER_REPO), builderNames=['docker'])]
|
||||
c['schedulers'] += [SingleBranchScheduler(name="registry", treeStableTimer=None,
|
||||
change_filter=filter.ChangeFilter(branch='master',
|
||||
repository=REGISTRY_REPO), builderNames=['docker-registry'])]
|
||||
c['schedulers'] += [SingleBranchScheduler(name='docker-pr', treeStableTimer=None,
|
||||
change_filter=filter.ChangeFilter(category='github_pullrequest',
|
||||
project='docker'), builderNames=['docker-pr'])]
|
||||
c['schedulers'] += [SingleBranchScheduler(name='docker-registry-pr', treeStableTimer=None,
|
||||
change_filter=filter.ChangeFilter(category='github_pullrequest',
|
||||
project='docker-registry'), builderNames=['docker-registry-pr'])]
|
||||
c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=[
|
||||
'nightlyrelease', 'backup'], hour=7, minute=00)]
|
||||
|
||||
|
||||
# Builders
|
||||
|
||||
# Backup
|
||||
factory = BuildFactory()
|
||||
factory.addStep(TestCommand(description='backup', logEnviron=False,
|
||||
usePTY=True, command='/docker-ci/tool/backup.py'))
|
||||
c['builders'] = [BuilderConfig(name='backup',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Docker test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(TestCommand(description='docker', logEnviron=False,
|
||||
usePTY=True, command='/docker-ci/dockertest/docker {}'.format(DOCKER_TEST_ARGV)))
|
||||
c['builders'] += [BuilderConfig(name='docker',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Docker pull request test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(TestCommand(description='docker-pr', logEnviron=False,
|
||||
usePTY=True, command=['/docker-ci/dockertest/docker',
|
||||
Property('revision'), Property('repository'), Property('branch')]))
|
||||
c['builders'] += [BuilderConfig(name='docker-pr',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# docker-registry test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(TestCommand(description='docker-registry', logEnviron=False,
|
||||
usePTY=True, command='/docker-ci/dockertest/docker-registry {}'.format(REGISTRY_TEST_ARGV)))
|
||||
c['builders'] += [BuilderConfig(name='docker-registry',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Docker registry pull request test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(TestCommand(description='docker-registry-pr', logEnviron=False,
|
||||
usePTY=True, command=['/docker-ci/dockertest/docker-registry',
|
||||
Property('revision'), Property('repository'), Property('branch')]))
|
||||
c['builders'] += [BuilderConfig(name='docker-registry-pr',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Docker nightly release
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,
|
||||
usePTY=True, command=['/docker-ci/dockertest/nightlyrelease']))
|
||||
c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Status
|
||||
authz_cfg = authz.Authz(auth=auth.BasicAuth([(WEB_USER, WEB_IRC_PWD)]),
|
||||
forceBuild='auth')
|
||||
c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]
|
||||
c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True,
|
||||
change_hook_dialects={ 'github': True }))
|
||||
c['status'].append(MailNotifier(fromaddr='docker-test@docker.io',
|
||||
sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP],
|
||||
mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True,
|
||||
smtpUser=SMTP_USER, smtpPassword=SMTP_PWD))
|
||||
c['status'].append(words.IRC("irc.freenode.net", "dockerqabot",
|
||||
channels=[IRC_CHANNEL], password=WEB_IRC_PWD, allowForce=True,
|
||||
notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1}))
|
|
@ -1,22 +0,0 @@
|
|||
docker-ci:
|
||||
image: "docker-ci/docker-ci"
|
||||
release_name: "docker-ci-0.5.6"
|
||||
ports: ["80","2222:22","8011:8011"]
|
||||
register: "80"
|
||||
volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"]
|
||||
command: []
|
||||
env:
|
||||
- "DEPLOYMENT=production"
|
||||
- "IRC_CHANNEL=docker-testing"
|
||||
- "BACKUP_BUCKET=backup-ci"
|
||||
- "$WEB_USER"
|
||||
- "$WEB_IRC_PWD"
|
||||
- "$BUILDBOT_PWD"
|
||||
- "$AWS_ACCESS_KEY"
|
||||
- "$AWS_SECRET_KEY"
|
||||
- "$GPG_PASSPHRASE"
|
||||
- "$BACKUP_AWS_ID"
|
||||
- "$BACKUP_AWS_SECRET"
|
||||
- "$SMTP_USER"
|
||||
- "$SMTP_PWD"
|
||||
- "$EMAIL_RCP"
|
|
@ -1,5 +0,0 @@
|
|||
default:
|
||||
hipaches: ['192.168.100.67:6379']
|
||||
daemons: ['192.168.100.67:4243']
|
||||
use_ssh: False
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
docker-ci:
|
||||
image: "docker-ci/docker-ci"
|
||||
release_name: "docker-ci-stage"
|
||||
ports: ["80","2222:22","8011:8011"]
|
||||
register: "80"
|
||||
volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"]
|
||||
command: []
|
||||
env:
|
||||
- "DEPLOYMENT=staging"
|
||||
- "IRC_CHANNEL=docker-testing-staging"
|
||||
- "BACKUP_BUCKET=ci-backup-stage"
|
||||
- "$BACKUP_AWS_ID"
|
||||
- "$BACKUP_AWS_SECRET"
|
||||
- "$WEB_USER"
|
||||
- "$WEB_IRC_PWD"
|
||||
- "$BUILDBOT_PWD"
|
||||
- "$AWS_ACCESS_KEY"
|
||||
- "$AWS_SECRET_KEY"
|
||||
- "$GPG_PASSPHRASE"
|
||||
- "$SMTP_USER"
|
||||
- "$SMTP_PWD"
|
||||
- "$EMAIL_RCP"
|
|
@ -1,5 +0,0 @@
|
|||
default:
|
||||
hipaches: ['192.168.100.65:6379']
|
||||
daemons: ['192.168.100.65:4243']
|
||||
use_ssh: False
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
export PATH='/go/bin':$PATH
|
||||
export DOCKER_PATH='/go/src/github.com/dotcloud/docker'
|
||||
|
||||
# Signal coverage report name, parsed by docker-ci
|
||||
set -x
|
||||
COVERAGE_PATH=$(date +"docker-%Y%m%d%H%M%S")
|
||||
set +x
|
||||
|
||||
REPORTS="/data/$COVERAGE_PATH"
|
||||
INDEX="$REPORTS/index.html"
|
||||
|
||||
# Test docker
|
||||
cd $DOCKER_PATH
|
||||
./hack/make.sh test; exit_status=$?
|
||||
PROFILE_PATH="$(ls -d $DOCKER_PATH/bundles/* | sed -n '$ p')/test/coverprofiles"
|
||||
|
||||
if [ "$exit_status" -eq "0" ]; then
|
||||
# Download coverage dependencies
|
||||
go get github.com/axw/gocov/gocov
|
||||
go get -u github.com/matm/gocov-html
|
||||
|
||||
# Create coverage report
|
||||
mkdir -p $REPORTS
|
||||
cd $PROFILE_PATH
|
||||
cat > $INDEX << "EOF"
|
||||
<!DOCTYPE html><head><meta charset="utf-8">
|
||||
<script type="text/javascript" src="//tablesorter.com/jquery-latest.js"></script>
|
||||
<script type="text/javascript" src="//tablesorter.com/__jquery.tablesorter.min.js"></script>
|
||||
<script type="text/javascript">$(document).ready(function() {
|
||||
$("table").tablesorter({ sortForce: [[1,0]] }); });</script>
|
||||
<style>table,th,td{border:1px solid black;}</style>
|
||||
<title>Docker Coverage Report</title>
|
||||
</head><body>
|
||||
<h1><strong>Docker Coverage Report</strong></h1>
|
||||
<table class="tablesorter">
|
||||
<thead><tr><th>package</th><th>pct</th></tr></thead><tbody>
|
||||
EOF
|
||||
for profile in *; do
|
||||
gocov convert $profile | gocov-html >$REPORTS/$profile.html
|
||||
echo "<tr><td><a href=\"${profile}.html\">$profile</a></td><td>" >> $INDEX
|
||||
go tool cover -func=$profile | sed -En '$ s/.+\t(.+)/\1/p' >> $INDEX
|
||||
echo "</td></tr>" >> $INDEX
|
||||
done
|
||||
echo "</tbody></table></body></html>" >> $INDEX
|
||||
fi
|
||||
|
||||
# Signal test and coverage result, parsed by docker-ci
|
||||
set -x
|
||||
exit $exit_status
|
||||
|
|
@ -1 +0,0 @@
|
|||
project
|
|
@ -1 +0,0 @@
|
|||
project
|
|
@ -1,13 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ "$DEPLOYMENT" == "production" ]; then
|
||||
AWS_S3_BUCKET='test.docker.io'
|
||||
else
|
||||
AWS_S3_BUCKET='get-staging.docker.io'
|
||||
fi
|
||||
|
||||
docker run --rm --privileged -v /run:/var/socket \
|
||||
-e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \
|
||||
-e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \
|
||||
-e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -x
|
||||
|
||||
PROJECT_NAME=$(basename $0)
|
||||
|
||||
docker run --rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \
|
||||
-v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3
|
||||
|
|
@ -1,61 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import os
|
||||
username, password = os.environ['DOCKER_CREDS'].split(':')
|
||||
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.common.keys import Keys
|
||||
from selenium.webdriver.support.ui import Select
|
||||
from selenium.common.exceptions import NoSuchElementException
|
||||
import unittest, time, re
|
||||
|
||||
class Docker(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.driver = webdriver.PhantomJS()
|
||||
self.driver.implicitly_wait(30)
|
||||
self.base_url = "http://www.docker.io/"
|
||||
self.verificationErrors = []
|
||||
self.accept_next_alert = True
|
||||
|
||||
def test_docker(self):
|
||||
driver = self.driver
|
||||
print "Login into {0} as login user {1} ...".format(self.base_url,username)
|
||||
driver.get(self.base_url + "/")
|
||||
driver.find_element_by_link_text("INDEX").click()
|
||||
driver.find_element_by_link_text("login").click()
|
||||
driver.find_element_by_id("id_username").send_keys(username)
|
||||
driver.find_element_by_id("id_password").send_keys(password)
|
||||
print "Checking login user ..."
|
||||
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
|
||||
try: self.assertEqual("test", driver.find_element_by_css_selector("h3").text)
|
||||
except AssertionError as e: self.verificationErrors.append(str(e))
|
||||
print "Login user {0} found".format(username)
|
||||
|
||||
def is_element_present(self, how, what):
|
||||
try: self.driver.find_element(by=how, value=what)
|
||||
except NoSuchElementException, e: return False
|
||||
return True
|
||||
|
||||
def is_alert_present(self):
|
||||
try: self.driver.switch_to_alert()
|
||||
except NoAlertPresentException, e: return False
|
||||
return True
|
||||
|
||||
def close_alert_and_get_its_text(self):
|
||||
try:
|
||||
alert = self.driver.switch_to_alert()
|
||||
alert_text = alert.text
|
||||
if self.accept_next_alert:
|
||||
alert.accept()
|
||||
else:
|
||||
alert.dismiss()
|
||||
return alert_text
|
||||
finally: self.accept_next_alert = True
|
||||
|
||||
def tearDown(self):
|
||||
self.driver.quit()
|
||||
self.assertEqual([], self.verificationErrors)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -1,27 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
# Cleanup
|
||||
rm -rf docker-registry
|
||||
|
||||
# Setup the environment
|
||||
export SETTINGS_FLAVOR=test
|
||||
export DOCKER_REGISTRY_CONFIG=config_test.yml
|
||||
export PYTHONPATH=$(pwd)/docker-registry/test
|
||||
|
||||
# Get latest docker registry
|
||||
git clone -q https://github.com/dotcloud/docker-registry.git
|
||||
cd docker-registry
|
||||
sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml
|
||||
|
||||
# Get dependencies
|
||||
pip install -q -r requirements.txt
|
||||
pip install -q -r test-requirements.txt
|
||||
pip install -q tox
|
||||
|
||||
# Run registry tests
|
||||
tox || exit 1
|
||||
python -m unittest discover -p s3.py -s test || exit 1
|
||||
python -m unittest discover -p workflow.py -s test
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
server {
|
||||
listen 80;
|
||||
root /data/docker-ci;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8000/;
|
||||
}
|
||||
|
||||
location /coverage {
|
||||
root /data/docker-ci;
|
||||
}
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
# VERSION: 0.22
|
||||
# DOCKER-VERSION 0.6.3
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
# DESCRIPTION: Generate docker-ci daily report
|
||||
# COMMENTS: The build process is initiated by deployment.py
|
||||
Report configuration is passed through ./credentials.json at
|
||||
# deployment time.
|
||||
# TO_BUILD: docker build -t report .
|
||||
# TO_DEPLOY: docker run report
|
||||
|
||||
from ubuntu:12.04
|
||||
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
|
||||
env PYTHONPATH /report
|
||||
|
||||
|
||||
# Add report dependencies
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \
|
||||
/etc/apt/sources.list
|
||||
run apt-get update; apt-get install -y python2.7 python-pip ssh rsync
|
||||
|
||||
# Set San Francisco timezone
|
||||
run echo "America/Los_Angeles" >/etc/timezone
|
||||
run dpkg-reconfigure --frontend noninteractive tzdata
|
||||
|
||||
# Add report code and set default container command
|
||||
add . /report
|
||||
cmd "/report/report.py"
|
|
@ -1,130 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
'''Deploy docker-ci report container on Digital Ocean.
|
||||
Usage:
|
||||
export CONFIG_JSON='
|
||||
{ "DROPLET_NAME": "Digital_Ocean_dropplet_name",
|
||||
"DO_CLIENT_ID": "Digital_Ocean_client_id",
|
||||
"DO_API_KEY": "Digital_Ocean_api_key",
|
||||
"DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id",
|
||||
"DOCKER_CI_KEY_PATH": "docker-ci_private_key_path",
|
||||
"DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)",
|
||||
"DOCKER_CI_ADDRESS" "user@docker-ci_fqdn_server",
|
||||
"SMTP_USER": "SMTP_server_user",
|
||||
"SMTP_PWD": "SMTP_server_password",
|
||||
"EMAIL_SENDER": "Buildbot_mailing_sender",
|
||||
"EMAIL_RCP": "Buildbot_mailing_receipient" }'
|
||||
python deployment.py
|
||||
'''
|
||||
|
||||
import re, json, requests, base64
|
||||
from fabric import api
|
||||
from fabric.api import cd, run, put, sudo
|
||||
from os import environ as env
|
||||
from time import sleep
|
||||
from datetime import datetime
|
||||
|
||||
# Populate environment variables
|
||||
CONFIG = json.loads(env['CONFIG_JSON'])
|
||||
for key in CONFIG:
|
||||
env[key] = CONFIG[key]
|
||||
|
||||
# Load DOCKER_CI_KEY
|
||||
env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read()
|
||||
|
||||
DROPLET_NAME = env.get('DROPLET_NAME','report')
|
||||
TIMEOUT = 120 # Seconds before timeout droplet creation
|
||||
IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
|
||||
REGION_ID = 4 # New York 2
|
||||
SIZE_ID = 66 # memory 512MB
|
||||
DO_IMAGE_USER = 'root' # Image user on Digital Ocean
|
||||
API_URL = 'https://api.digitalocean.com/'
|
||||
|
||||
|
||||
class digital_ocean():
|
||||
|
||||
def __init__(self, key, client):
|
||||
'''Set default API parameters'''
|
||||
self.key = key
|
||||
self.client = client
|
||||
self.api_url = API_URL
|
||||
|
||||
def api(self, cmd_path, api_arg={}):
|
||||
'''Make api call'''
|
||||
api_arg.update({'api_key':self.key, 'client_id':self.client})
|
||||
resp = requests.get(self.api_url + cmd_path, params=api_arg).text
|
||||
resp = json.loads(resp)
|
||||
if resp['status'] != 'OK':
|
||||
raise Exception(resp['error_message'])
|
||||
return resp
|
||||
|
||||
def droplet_data(self, name):
|
||||
'''Get droplet data'''
|
||||
data = self.api('droplets')
|
||||
data = [droplet for droplet in data['droplets']
|
||||
if droplet['name'] == name]
|
||||
return data[0] if data else {}
|
||||
|
||||
def json_fmt(data):
|
||||
'''Format json output'''
|
||||
return json.dumps(data, sort_keys = True, indent = 2)
|
||||
|
||||
|
||||
do = digital_ocean(env['DO_API_KEY'], env['DO_CLIENT_ID'])
|
||||
|
||||
# Get DROPLET_NAME data
|
||||
data = do.droplet_data(DROPLET_NAME)
|
||||
|
||||
# Stop processing if DROPLET_NAME exists on Digital Ocean
|
||||
if data:
|
||||
print ('Droplet: {} already deployed. Not further processing.'
|
||||
.format(DROPLET_NAME))
|
||||
exit(1)
|
||||
|
||||
# Create droplet
|
||||
do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID,
|
||||
'image_id':IMAGE_ID, 'size_id':SIZE_ID,
|
||||
'ssh_key_ids':[env['DOCKER_KEY_ID']]})
|
||||
|
||||
# Wait for droplet to be created.
|
||||
start_time = datetime.now()
|
||||
while (data.get('status','') != 'active' and (
|
||||
datetime.now()-start_time).seconds < TIMEOUT):
|
||||
data = do.droplet_data(DROPLET_NAME)
|
||||
print data['status']
|
||||
sleep(3)
|
||||
|
||||
# Wait for the machine to boot
|
||||
sleep(15)
|
||||
|
||||
# Get droplet IP
|
||||
ip = str(data['ip_address'])
|
||||
print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip)
|
||||
|
||||
api.env.host_string = ip
|
||||
api.env.user = DO_IMAGE_USER
|
||||
api.env.key_filename = env['DOCKER_CI_KEY_PATH']
|
||||
|
||||
# Correct timezone
|
||||
sudo('echo "America/Los_Angeles" >/etc/timezone')
|
||||
sudo('dpkg-reconfigure --frontend noninteractive tzdata')
|
||||
|
||||
# Load JSON_CONFIG environment for Dockerfile
|
||||
CONFIG_JSON= base64.b64encode(
|
||||
'{{"DOCKER_CI_PUB": "{DOCKER_CI_PUB}",'
|
||||
' "DOCKER_CI_KEY": "{DOCKER_CI_KEY}",'
|
||||
' "DOCKER_CI_ADDRESS": "{DOCKER_CI_ADDRESS}",'
|
||||
' "SMTP_USER": "{SMTP_USER}",'
|
||||
' "SMTP_PWD": "{SMTP_PWD}",'
|
||||
' "EMAIL_SENDER": "{EMAIL_SENDER}",'
|
||||
' "EMAIL_RCP": "{EMAIL_RCP}"}}'.format(**env))
|
||||
|
||||
run('mkdir -p /data/report')
|
||||
put('./', '/data/report')
|
||||
with cd('/data/report'):
|
||||
run('chmod 700 report.py')
|
||||
run('echo "{}" > credentials.json'.format(CONFIG_JSON))
|
||||
run('docker build -t report .')
|
||||
run('rm credentials.json')
|
||||
run("echo -e '30 09 * * * /usr/bin/docker run report\n' |"
|
||||
" /usr/bin/crontab -")
|
|
@ -1,145 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
'''CONFIG_JSON is a json encoded string base64 environment variable. It is used
|
||||
to clone docker-ci database, generate docker-ci report and submit it by email.
|
||||
CONFIG_JSON data comes from the file /report/credentials.json inserted in this
|
||||
container by deployment.py:
|
||||
|
||||
{ "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)",
|
||||
"DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)",
|
||||
"DOCKER_CI_ADDRESS": "user@docker-ci_fqdn_server",
|
||||
"SMTP_USER": "SMTP_server_user",
|
||||
"SMTP_PWD": "SMTP_server_password",
|
||||
"EMAIL_SENDER": "Buildbot_mailing_sender",
|
||||
"EMAIL_RCP": "Buildbot_mailing_receipient" } '''
|
||||
|
||||
import os, re, json, sqlite3, datetime, base64
|
||||
import smtplib
|
||||
from datetime import timedelta
|
||||
from subprocess import call
|
||||
from os import environ as env
|
||||
|
||||
TODAY = datetime.date.today()
|
||||
|
||||
# Load credentials to the environment
|
||||
env['CONFIG_JSON'] = base64.b64decode(open('/report/credentials.json').read())
|
||||
|
||||
# Remove SSH private key as it needs more processing
|
||||
CONFIG = json.loads(re.sub(r'("DOCKER_CI_KEY".+?"(.+?)",)','',
|
||||
env['CONFIG_JSON'], flags=re.DOTALL))
|
||||
|
||||
# Populate environment variables
|
||||
for key in CONFIG:
|
||||
env[key] = CONFIG[key]
|
||||
|
||||
# Load SSH private key
|
||||
env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1',
|
||||
env['CONFIG_JSON'],flags=re.DOTALL)
|
||||
|
||||
# Prevent rsync to validate host on first connection to docker-ci
|
||||
os.makedirs('/root/.ssh')
|
||||
open('/root/.ssh/id_rsa','w').write(env['DOCKER_CI_KEY'])
|
||||
os.chmod('/root/.ssh/id_rsa',0600)
|
||||
open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n')
|
||||
|
||||
|
||||
# Sync buildbot database from docker-ci
|
||||
call('rsync {}:/data/buildbot/master/state.sqlite .'.format(
|
||||
env['DOCKER_CI_ADDRESS']), shell=True)
|
||||
|
||||
class SQL:
|
||||
def __init__(self, database_name):
|
||||
sql = sqlite3.connect(database_name)
|
||||
# Use column names as keys for fetchall rows
|
||||
sql.row_factory = sqlite3.Row
|
||||
sql = sql.cursor()
|
||||
self.sql = sql
|
||||
|
||||
def query(self,query_statement):
|
||||
return self.sql.execute(query_statement).fetchall()
|
||||
|
||||
sql = SQL("state.sqlite")
|
||||
|
||||
|
||||
class Report():
|
||||
|
||||
def __init__(self,period='',date=''):
|
||||
self.data = []
|
||||
self.period = 'date' if not period else period
|
||||
self.date = str(TODAY) if not date else date
|
||||
self.compute()
|
||||
|
||||
def compute(self):
|
||||
'''Compute report'''
|
||||
if self.period == 'week':
|
||||
self.week_report(self.date)
|
||||
else:
|
||||
self.date_report(self.date)
|
||||
|
||||
|
||||
def date_report(self,date):
|
||||
'''Create a date test report'''
|
||||
builds = []
|
||||
# Get a queryset with all builds from date
|
||||
rows = sql.query('SELECT * FROM builds JOIN buildrequests'
|
||||
' WHERE builds.brid=buildrequests.id and'
|
||||
' date(start_time, "unixepoch", "localtime") = "{0}"'
|
||||
' GROUP BY number'.format(date))
|
||||
build_names = sorted(set([row['buildername'] for row in rows]))
|
||||
# Create a report build line for a given build
|
||||
for build_name in build_names:
|
||||
tried = len([row['buildername']
|
||||
for row in rows if row['buildername'] == build_name])
|
||||
fail_tests = [row['buildername'] for row in rows if (
|
||||
row['buildername'] == build_name and row['results'] != 0)]
|
||||
fail = len(fail_tests)
|
||||
fail_details = ''
|
||||
fail_pct = int(100.0*fail/tried) if tried != 0 else 100
|
||||
builds.append({'name': build_name, 'tried': tried, 'fail': fail,
|
||||
'fail_pct': fail_pct, 'fail_details':fail_details})
|
||||
if builds:
|
||||
self.data.append({'date': date, 'builds': builds})
|
||||
|
||||
|
||||
def week_report(self,date):
|
||||
'''Add the week's date test reports to report.data'''
|
||||
date = datetime.datetime.strptime(date,'%Y-%m-%d').date()
|
||||
last_monday = date - datetime.timedelta(days=date.weekday())
|
||||
week_dates = [last_monday + timedelta(days=x) for x in range(7,-1,-1)]
|
||||
for date in week_dates:
|
||||
self.date_report(str(date))
|
||||
|
||||
def render_text(self):
|
||||
'''Return rendered report in text format'''
|
||||
retval = ''
|
||||
fail_tests = {}
|
||||
for builds in self.data:
|
||||
retval += 'Test date: {0}\n'.format(builds['date'],retval)
|
||||
table = ''
|
||||
for build in builds['builds']:
|
||||
table += ('Build {name:15} Tried: {tried:4} '
|
||||
' Failures: {fail:4} ({fail_pct}%)\n'.format(**build))
|
||||
if build['name'] in fail_tests:
|
||||
fail_tests[build['name']] += build['fail_details']
|
||||
else:
|
||||
fail_tests[build['name']] = build['fail_details']
|
||||
retval += '{0}\n'.format(table)
|
||||
retval += '\n Builds failing'
|
||||
for fail_name in fail_tests:
|
||||
retval += '\n' + fail_name + '\n'
|
||||
for (fail_id,fail_url,rn_tests,nr_errors,log_errors,
|
||||
tracelog_errors) in fail_tests[fail_name]:
|
||||
retval += fail_url + '\n'
|
||||
retval += '\n\n'
|
||||
return retval
|
||||
|
||||
|
||||
# Send email
|
||||
smtp_from = env['EMAIL_SENDER']
|
||||
subject = '[docker-ci] Daily report for {}'.format(str(TODAY))
|
||||
msg = "From: {}\r\nTo: {}\r\nSubject: {}\r\n\r\n".format(
|
||||
smtp_from, env['EMAIL_RCP'], subject)
|
||||
msg = msg + Report('week').render_text()
|
||||
server = smtplib.SMTP_SSL('smtp.mailgun.org')
|
||||
server.login(env['SMTP_USER'], env['SMTP_PWD'])
|
||||
server.sendmail(smtp_from, env['EMAIL_RCP'], msg)
|
|
@ -1,54 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Set timezone
|
||||
echo "GMT" >/etc/timezone
|
||||
dpkg-reconfigure --frontend noninteractive tzdata
|
||||
|
||||
# Set ssh superuser
|
||||
mkdir -p /data/buildbot /var/run/sshd /run
|
||||
useradd -m -d /home/sysadmin -s /bin/bash -G sudo,docker -p '*' sysadmin
|
||||
sed -Ei 's/(\%sudo.*) ALL/\1 NOPASSWD:ALL/' /etc/sudoers
|
||||
cd /home/sysadmin
|
||||
mkdir .ssh
|
||||
chmod 700 .ssh
|
||||
cat > .ssh/authorized_keys << 'EOF'
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7ALVhwQ68q1SjrKaAduOuOEAcWmb8kDZf5qA7T1fM8AP07EDC7nSKRJ8PXUBGTOQfxm89coJDuSJsTAZ+1PvglXhA0Mq6+knc6ZrZY+SuZlDIDAk4TOdVPoDZnmR1YW2McxHkhcGIOKeC8MMig5NeEjtgQwXzauUSPqeh8HMlLZRMooFYyyluIpn7NaCLzyWjwAQz2s3KyI7VE7hl+ncCrW86v+dciEdwqtzNoUMFb3iDpPxaiCl3rv+SB7co/5eUDTs1FZvUcYMXKQuf8R+2ZKzXOpwr0Zs8sKQXvXavCeWykwGgXLBjVkvrDcHuDD6UXCW63UKgmRECpLZaMBVIIRWLEEgTS5OSQTcxpMVe5zUW6sDvXHTcdPwWrcn1dE9F/0vLC0HJ4ADKelLX5zyTpmXGbuZuntIf1JO67D/K/P++uV1rmVIH+zgtOf23w5rX2zKb4BSTqP0sv61pmWV7MEVoEz6yXswcTjS92tb775v7XLU9vKAkt042ORFdE4/++hejhL/Lj52IRgjt1CJZHZsR9JywJZrz3kYuf8eU2J2FYh0Cpz5gmf0f+12Rt4HztnZxGPP4KuMa66e4+hpx1jynjMZ7D5QUnNYEmuvJByopn8HSluuY/kS5MMyZCZtJLEPGX4+yECX0Di/S0vCRl2NyqfCBqS+yXXT5SA1nFw== docker-test@docker.io
|
||||
EOF
|
||||
chmod 600 .ssh/authorized_keys
|
||||
chown -R sysadmin .ssh
|
||||
|
||||
# Fix docker group id for use of host dockerd by sysadmin
|
||||
sed -Ei 's/(docker:x:)[^:]+/\1999/' /etc/group
|
||||
|
||||
# Create buildbot configuration
|
||||
cd /data/buildbot; buildbot create-master master
|
||||
cp -a /data/buildbot/master/master.cfg.sample \
|
||||
/data/buildbot/master/master.cfg
|
||||
cd /data/buildbot; \
|
||||
buildslave create-slave slave localhost:9989 buildworker pass
|
||||
cp /docker-ci/buildbot/master.cfg /data/buildbot/master
|
||||
|
||||
# Patch github webstatus to capture pull requests
|
||||
cp /docker-ci/buildbot/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks
|
||||
chown -R sysadmin.sysadmin /data
|
||||
|
||||
# Create nginx configuration
|
||||
rm /etc/nginx/sites-enabled/default
|
||||
cp /docker-ci/nginx/nginx.conf /etc/nginx/conf.d/buildbot.conf
|
||||
/bin/echo -e '\ndaemon off;\n' >> /etc/nginx/nginx.conf
|
||||
|
||||
# Set supervisord buildbot, nginx and sshd processes
|
||||
/bin/echo -e "\
|
||||
[program:buildmaster]\n\
|
||||
command=twistd --nodaemon --no_save -y buildbot.tac\n\
|
||||
directory=/data/buildbot/master\n\
|
||||
user=sysadmin\n\n\
|
||||
[program:buildworker]\n\
|
||||
command=twistd --nodaemon --no_save -y buildbot.tac\n\
|
||||
directory=/data/buildbot/slave\n\
|
||||
user=sysadmin\n" > \
|
||||
/etc/supervisor/conf.d/buildbot.conf
|
||||
/bin/echo -e "[program:nginx]\ncommand=/usr/sbin/nginx\n" > \
|
||||
/etc/supervisor/conf.d/nginx.conf
|
||||
/bin/echo -e "[program:sshd]\ncommand=/usr/sbin/sshd -D\n" > \
|
||||
/etc/supervisor/conf.d/sshd.conf
|
|
@ -1,12 +0,0 @@
|
|||
# TO_BUILD: docker build --no-cache -t docker-ci/testbuilder .
|
||||
# TO_RUN: docker run --rm -u sysadmin \
|
||||
# -v /run:/var/socket docker-ci/testbuilder docker-registry
|
||||
#
|
||||
|
||||
FROM docker-ci/docker-ci
|
||||
ENV HOME /home/sysadmin
|
||||
|
||||
RUN mkdir /testbuilder
|
||||
ADD . /testbuilder
|
||||
|
||||
ENTRYPOINT ["/testbuilder/testbuilder.sh"]
|
|
@ -1,12 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -x
|
||||
set -e
|
||||
PROJECT_PATH=$1
|
||||
|
||||
# Build the docker project
|
||||
cd /data/$PROJECT_PATH
|
||||
sg docker -c "docker build -q -t registry ."
|
||||
cd test; sg docker -c "docker build -q -t docker-registry-test ."
|
||||
|
||||
# Run the tests
|
||||
sg docker -c "docker run --rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test"
|
|
@ -1,18 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -x
|
||||
set -e
|
||||
PROJECT_PATH=$1
|
||||
|
||||
# Build the docker project
|
||||
cd /data/$PROJECT_PATH
|
||||
sg docker -c "docker build -q -t docker ."
|
||||
|
||||
if [ "$DOCKER_RELEASE" == "1" ]; then
|
||||
# Do nightly release
|
||||
echo sg docker -c "docker run --rm --privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh"
|
||||
set +x
|
||||
sg docker -c "docker run --rm --privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh"
|
||||
else
|
||||
# Run the tests
|
||||
sg docker -c "docker run --rm --privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh"
|
||||
fi
|
|
@ -1,40 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# Download, build and run a docker project tests
|
||||
# Environment variables: DEPLOYMENT
|
||||
|
||||
cat $0
|
||||
set -e
|
||||
set -x
|
||||
|
||||
PROJECT=$1
|
||||
COMMIT=${2-HEAD}
|
||||
REPO=${3-https://github.com/dotcloud/$PROJECT}
|
||||
BRANCH=${4-master}
|
||||
REPO_PROJ="https://github.com/docker-test/$PROJECT"
|
||||
if [ "$DEPLOYMENT" == "production" ]; then
|
||||
REPO_PROJ="https://github.com/dotcloud/$PROJECT"
|
||||
fi
|
||||
set +x
|
||||
|
||||
# Generate a random string of $1 characters
|
||||
function random {
|
||||
cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1
|
||||
}
|
||||
|
||||
PROJECT_PATH="$PROJECT-tmp-$(random 12)"
|
||||
|
||||
# Set docker-test git user
|
||||
set -x
|
||||
git config --global user.email "docker-test@docker.io"
|
||||
git config --global user.name "docker-test"
|
||||
|
||||
# Fetch project
|
||||
git clone -q $REPO_PROJ -b master /data/$PROJECT_PATH
|
||||
cd /data/$PROJECT_PATH
|
||||
echo "Git commit: $(git rev-parse HEAD)"
|
||||
git fetch -q $REPO $BRANCH
|
||||
git merge --no-edit $COMMIT
|
||||
|
||||
# Build the project dockertest
|
||||
/testbuilder/$PROJECT.sh $PROJECT_PATH
|
||||
rm -rf /data/$PROJECT_PATH
|
|
@ -1,47 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os,sys,json
|
||||
from datetime import datetime
|
||||
from filecmp import cmp
|
||||
from subprocess import check_call
|
||||
from boto.s3.key import Key
|
||||
from boto.s3.connection import S3Connection
|
||||
|
||||
def ENV(x):
|
||||
'''Promote an environment variable for global use returning its value'''
|
||||
retval = os.environ.get(x, '')
|
||||
globals()[x] = retval
|
||||
return retval
|
||||
|
||||
ROOT_PATH = '/data/backup/docker-ci'
|
||||
TODAY = str(datetime.today())[:10]
|
||||
BACKUP_FILE = '{}/docker-ci_{}.tgz'.format(ROOT_PATH, TODAY)
|
||||
BACKUP_LINK = '{}/docker-ci.tgz'.format(ROOT_PATH)
|
||||
ENV('BACKUP_BUCKET')
|
||||
ENV('BACKUP_AWS_ID')
|
||||
ENV('BACKUP_AWS_SECRET')
|
||||
|
||||
'''Create full master buildbot backup, avoiding duplicates'''
|
||||
# Ensure backup path exist
|
||||
if not os.path.exists(ROOT_PATH):
|
||||
os.makedirs(ROOT_PATH)
|
||||
# Make actual backups
|
||||
check_call('/bin/tar czf {} -C /data --exclude=backup --exclude=buildbot/slave'
|
||||
' . 1>/dev/null 2>&1'.format(BACKUP_FILE),shell=True)
|
||||
# remove previous dump if it is the same as the latest
|
||||
if (os.path.exists(BACKUP_LINK) and cmp(BACKUP_FILE, BACKUP_LINK) and
|
||||
os.path._resolve_link(BACKUP_LINK) != BACKUP_FILE):
|
||||
os.unlink(os.path._resolve_link(BACKUP_LINK))
|
||||
# Recreate backup link pointing to latest backup
|
||||
try:
|
||||
os.unlink(BACKUP_LINK)
|
||||
except:
|
||||
pass
|
||||
os.symlink(BACKUP_FILE, BACKUP_LINK)
|
||||
|
||||
# Make backup on S3
|
||||
bucket = S3Connection(BACKUP_AWS_ID,BACKUP_AWS_SECRET).get_bucket(BACKUP_BUCKET)
|
||||
k = Key(bucket)
|
||||
k.key = BACKUP_FILE
|
||||
k.set_contents_from_filename(BACKUP_FILE)
|
||||
bucket.copy_key(os.path.basename(BACKUP_LINK),BACKUP_BUCKET,BACKUP_FILE[1:])
|
|
@ -85,7 +85,7 @@ case "$lsb_dist" in
|
|||
fi
|
||||
}
|
||||
|
||||
# TODO remove this section once device-mapper lands
|
||||
# aufs is preferred over devicemapper; try to ensure the driver is available.
|
||||
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
|
||||
kern_extras="linux-image-extra-$(uname -r)"
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ LDFLAGS='
|
|||
'
|
||||
LDFLAGS_STATIC='-linkmode external'
|
||||
EXTLDFLAGS_STATIC='-static'
|
||||
BUILDFLAGS=( -a -tags "netgo $DOCKER_BUILDTAGS" )
|
||||
BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" )
|
||||
|
||||
# A few more flags that are specific just to building a completely-static binary (see hack/make/binary)
|
||||
# PLEASE do not use these anywhere else.
|
||||
|
|
|
@ -58,3 +58,6 @@ mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar
|
|||
rm -rf src/code.google.com/p/go
|
||||
mkdir -p src/code.google.com/p/go/src/pkg/archive
|
||||
mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar
|
||||
|
||||
clone git github.com/godbus/dbus cb98efbb933d8389ab549a060e880ea3c375d213
|
||||
clone git github.com/coreos/go-systemd 4c14ed39b8a643ac44b4f95b5a53c00e94261475
|
||||
|
|
|
@ -311,6 +311,16 @@ RUN [ "$(cat /testfile)" = 'test!' ]
|
|||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
`
|
||||
FROM {IMAGE}
|
||||
# what \
|
||||
RUN mkdir /testing
|
||||
RUN touch /testing/other
|
||||
`,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
// FIXME: test building with 2 successive overlapping ADD commands
|
||||
|
@ -998,3 +1008,21 @@ func TestBuildOnBuildForbiddenMaintainerTrigger(t *testing.T) {
|
|||
t.Fatal("Error should not be nil")
|
||||
}
|
||||
}
|
||||
|
||||
// gh #2446
|
||||
func TestBuildAddToSymlinkDest(t *testing.T) {
|
||||
eng := NewTestEngine(t)
|
||||
defer nuke(mkRuntimeFromEngine(eng, t))
|
||||
|
||||
_, err := buildImage(testContextTemplate{`
|
||||
from {IMAGE}
|
||||
run mkdir /foo
|
||||
run ln -s /foo /bar
|
||||
add foo /bar/
|
||||
run stat /bar/foo
|
||||
`,
|
||||
[][2]string{{"foo", "HEYO"}}, nil}, t, eng, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -252,6 +252,25 @@ func TestRunWorkdirExists(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected
|
||||
func TestRunWorkdirExistsAndIsFile(t *testing.T) {
|
||||
|
||||
cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
|
||||
defer cleanup(globalEngine, t)
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
defer close(c)
|
||||
if err := cli.CmdRun("-w", "/bin/cat", unitTestImageID, "pwd"); err == nil {
|
||||
t.Fatal("should have failed to run when using /bin/cat as working dir.")
|
||||
}
|
||||
}()
|
||||
|
||||
setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
|
||||
<-c
|
||||
})
|
||||
}
|
||||
|
||||
func TestRunExit(t *testing.T) {
|
||||
stdin, stdinPipe := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
|
|
@ -1553,7 +1553,7 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
|
|||
runtime := mkRuntimeFromEngine(eng, t)
|
||||
defer nuke(runtime)
|
||||
|
||||
config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil)
|
||||
config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show", "up"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -416,7 +416,7 @@ func TestRestartKillWait(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestCreateStartRestartKillStartKillRm(t *testing.T) {
|
||||
func TestCreateStartRestartStopStartKillRm(t *testing.T) {
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
defer mkRuntimeFromEngine(eng, t).Nuke()
|
||||
|
@ -456,7 +456,8 @@ func TestCreateStartRestartKillStartKillRm(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
job = eng.Job("kill", id)
|
||||
job = eng.Job("stop", id)
|
||||
job.SetenvInt("t", 15)
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
15
pkg/cgroups/apply_nosystemd.go
Normal file
15
pkg/cgroups/apply_nosystemd.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
// +build !linux
|
||||
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func useSystemd() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func systemdApply(c *Cgroup, pid int) (ActiveCgroup, error) {
|
||||
return nil, fmt.Errorf("Systemd not supported")
|
||||
}
|
189
pkg/cgroups/apply_raw.go
Normal file
189
pkg/cgroups/apply_raw.go
Normal file
|
@ -0,0 +1,189 @@
|
|||
package cgroups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type rawCgroup struct {
|
||||
root string
|
||||
cgroup string
|
||||
}
|
||||
|
||||
func rawApply(c *Cgroup, pid int) (ActiveCgroup, error) {
|
||||
// We have two implementation of cgroups support, one is based on
|
||||
// systemd and the dbus api, and one is based on raw cgroup fs operations
|
||||
// following the pre-single-writer model docs at:
|
||||
// http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/
|
||||
//
|
||||
// we can pick any subsystem to find the root
|
||||
|
||||
cgroupRoot, err := FindCgroupMountpoint("cpu")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cgroupRoot = filepath.Dir(cgroupRoot)
|
||||
|
||||
if _, err := os.Stat(cgroupRoot); err != nil {
|
||||
return nil, fmt.Errorf("cgroups fs not found")
|
||||
}
|
||||
|
||||
cgroup := c.Name
|
||||
if c.Parent != "" {
|
||||
cgroup = filepath.Join(c.Parent, cgroup)
|
||||
}
|
||||
|
||||
raw := &rawCgroup{
|
||||
root: cgroupRoot,
|
||||
cgroup: cgroup,
|
||||
}
|
||||
|
||||
if err := raw.setupDevices(c, pid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := raw.setupMemory(c, pid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := raw.setupCpu(c, pid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func (raw *rawCgroup) path(subsystem string) (string, error) {
|
||||
initPath, err := GetInitCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(raw.root, subsystem, initPath, raw.cgroup), nil
|
||||
}
|
||||
|
||||
func (raw *rawCgroup) join(subsystem string, pid int) (string, error) {
|
||||
path, err := raw.path(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
if err := writeFile(path, "cgroup.procs", strconv.Itoa(pid)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (raw *rawCgroup) setupDevices(c *Cgroup, pid int) (err error) {
|
||||
if !c.DeviceAccess {
|
||||
dir, err := raw.join("devices", pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := writeFile(dir, "devices.deny", "a"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allow := []string{
|
||||
// /dev/null, zero, full
|
||||
"c 1:3 rwm",
|
||||
"c 1:5 rwm",
|
||||
"c 1:7 rwm",
|
||||
|
||||
// consoles
|
||||
"c 5:1 rwm",
|
||||
"c 5:0 rwm",
|
||||
"c 4:0 rwm",
|
||||
"c 4:1 rwm",
|
||||
|
||||
// /dev/urandom,/dev/random
|
||||
"c 1:9 rwm",
|
||||
"c 1:8 rwm",
|
||||
|
||||
// /dev/pts/ - pts namespaces are "coming soon"
|
||||
"c 136:* rwm",
|
||||
"c 5:2 rwm",
|
||||
|
||||
// tuntap
|
||||
"c 10:200 rwm",
|
||||
}
|
||||
|
||||
for _, val := range allow {
|
||||
if err := writeFile(dir, "devices.allow", val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (raw *rawCgroup) setupMemory(c *Cgroup, pid int) (err error) {
|
||||
if c.Memory != 0 || c.MemorySwap != 0 {
|
||||
dir, err := raw.join("memory", pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}()
|
||||
|
||||
if c.Memory != 0 {
|
||||
if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// By default, MemorySwap is set to twice the size of RAM.
|
||||
// If you want to omit MemorySwap, set it to `-1'.
|
||||
if c.MemorySwap != -1 {
|
||||
if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Memory*2, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (raw *rawCgroup) setupCpu(c *Cgroup, pid int) (err error) {
|
||||
// We always want to join the cpu group, to allow fair cpu scheduling
|
||||
// on a container basis
|
||||
dir, err := raw.join("cpu", pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.CpuShares != 0 {
|
||||
if err := writeFile(dir, "cpu.shares", strconv.FormatInt(c.CpuShares, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (raw *rawCgroup) Cleanup() error {
|
||||
get := func(subsystem string) string {
|
||||
path, _ := raw.path(subsystem)
|
||||
return path
|
||||
}
|
||||
|
||||
for _, path := range []string{
|
||||
get("memory"),
|
||||
get("devices"),
|
||||
get("cpu"),
|
||||
} {
|
||||
if path != "" {
|
||||
os.RemoveAll(path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
158
pkg/cgroups/apply_systemd.go
Normal file
158
pkg/cgroups/apply_systemd.go
Normal file
|
@ -0,0 +1,158 @@
|
|||
// +build linux
|
||||
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
systemd1 "github.com/coreos/go-systemd/dbus"
|
||||
"github.com/dotcloud/docker/pkg/systemd"
|
||||
"github.com/godbus/dbus"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type systemdCgroup struct {
|
||||
}
|
||||
|
||||
var (
|
||||
connLock sync.Mutex
|
||||
theConn *systemd1.Conn
|
||||
hasStartTransientUnit bool
|
||||
)
|
||||
|
||||
func useSystemd() bool {
|
||||
if !systemd.SdBooted() {
|
||||
return false
|
||||
}
|
||||
|
||||
connLock.Lock()
|
||||
defer connLock.Unlock()
|
||||
|
||||
if theConn == nil {
|
||||
var err error
|
||||
theConn, err = systemd1.New()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Assume we have StartTransientUnit
|
||||
hasStartTransientUnit = true
|
||||
|
||||
// But if we get UnknownMethod error we don't
|
||||
if _, err := theConn.StartTransientUnit("test.scope", "invalid"); err != nil {
|
||||
if dbusError, ok := err.(dbus.Error); ok {
|
||||
if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
|
||||
hasStartTransientUnit = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasStartTransientUnit
|
||||
}
|
||||
|
||||
type DeviceAllow struct {
|
||||
Node string
|
||||
Permissions string
|
||||
}
|
||||
|
||||
func getIfaceForUnit(unitName string) string {
|
||||
if strings.HasSuffix(unitName, ".scope") {
|
||||
return "Scope"
|
||||
}
|
||||
if strings.HasSuffix(unitName, ".service") {
|
||||
return "Service"
|
||||
}
|
||||
return "Unit"
|
||||
}
|
||||
|
||||
func systemdApply(c *Cgroup, pid int) (ActiveCgroup, error) {
|
||||
unitName := c.Parent + "-" + c.Name + ".scope"
|
||||
slice := "system.slice"
|
||||
|
||||
var properties []systemd1.Property
|
||||
|
||||
for _, v := range c.UnitProperties {
|
||||
switch v[0] {
|
||||
case "Slice":
|
||||
slice = v[1]
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown unit propery %s", v[0])
|
||||
}
|
||||
}
|
||||
|
||||
properties = append(properties,
|
||||
systemd1.Property{"Slice", dbus.MakeVariant(slice)},
|
||||
systemd1.Property{"Description", dbus.MakeVariant("docker container " + c.Name)},
|
||||
systemd1.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})})
|
||||
|
||||
if !c.DeviceAccess {
|
||||
properties = append(properties,
|
||||
systemd1.Property{"DevicePolicy", dbus.MakeVariant("strict")},
|
||||
systemd1.Property{"DeviceAllow", dbus.MakeVariant([]DeviceAllow{
|
||||
{"/dev/null", "rwm"},
|
||||
{"/dev/zero", "rwm"},
|
||||
{"/dev/full", "rwm"},
|
||||
{"/dev/random", "rwm"},
|
||||
{"/dev/urandom", "rwm"},
|
||||
{"/dev/tty", "rwm"},
|
||||
{"/dev/console", "rwm"},
|
||||
{"/dev/tty0", "rwm"},
|
||||
{"/dev/tty1", "rwm"},
|
||||
{"/dev/pts/ptmx", "rwm"},
|
||||
// There is no way to add /dev/pts/* here atm, so we hack this manually below
|
||||
// /dev/pts/* (how to add this?)
|
||||
// Same with tuntap, which doesn't exist as a node most of the time
|
||||
})})
|
||||
}
|
||||
|
||||
if c.Memory != 0 {
|
||||
properties = append(properties,
|
||||
systemd1.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))})
|
||||
}
|
||||
// TODO: MemorySwap not available in systemd
|
||||
|
||||
if c.CpuShares != 0 {
|
||||
properties = append(properties,
|
||||
systemd1.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))})
|
||||
}
|
||||
|
||||
if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To work around the lack of /dev/pts/* support above we need to manually add these
|
||||
// so, ask systemd for the cgroup used
|
||||
props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cgroup := props["ControlGroup"].(string)
|
||||
|
||||
if !c.DeviceAccess {
|
||||
mountpoint, err := FindCgroupMountpoint("devices")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
path := filepath.Join(mountpoint, cgroup)
|
||||
|
||||
// /dev/pts/*
|
||||
if err := writeFile(path, "devices.allow", "c 136:* rwm"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// tuntap
|
||||
if err := writeFile(path, "devices.allow", "c 10:200 rwm"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &systemdCgroup{}, nil
|
||||
}
|
||||
|
||||
func (c *systemdCgroup) Cleanup() error {
|
||||
// systemd cleans up, we don't need to do anything
|
||||
return nil
|
||||
}
|
|
@ -8,7 +8,6 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -21,6 +20,12 @@ type Cgroup struct {
|
|||
MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap
|
||||
CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers)
|
||||
CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use
|
||||
|
||||
UnitProperties [][2]string `json:"unit_properties,omitempty"` // systemd unit properties
|
||||
}
|
||||
|
||||
type ActiveCgroup interface {
|
||||
Cleanup() error
|
||||
}
|
||||
|
||||
// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
|
||||
|
@ -63,49 +68,6 @@ func GetInitCgroupDir(subsystem string) (string, error) {
|
|||
return parseCgroupFile(subsystem, f)
|
||||
}
|
||||
|
||||
func (c *Cgroup) Path(root, subsystem string) (string, error) {
|
||||
cgroup := c.Name
|
||||
if c.Parent != "" {
|
||||
cgroup = filepath.Join(c.Parent, cgroup)
|
||||
}
|
||||
initPath, err := GetInitCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(root, subsystem, initPath, cgroup), nil
|
||||
}
|
||||
|
||||
func (c *Cgroup) Join(root, subsystem string, pid int) (string, error) {
|
||||
path, err := c.Path(root, subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
if err := writeFile(path, "tasks", strconv.Itoa(pid)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (c *Cgroup) Cleanup(root string) error {
|
||||
get := func(subsystem string) string {
|
||||
path, _ := c.Path(root, subsystem)
|
||||
return path
|
||||
}
|
||||
|
||||
for _, path := range []string{
|
||||
get("memory"),
|
||||
get("devices"),
|
||||
get("cpu"),
|
||||
get("cpuset"),
|
||||
} {
|
||||
os.RemoveAll(path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseCgroupFile(subsystem string, r io.Reader) (string, error) {
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
|
@ -127,131 +89,17 @@ func writeFile(dir, file, data string) error {
|
|||
return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
|
||||
}
|
||||
|
||||
func (c *Cgroup) Apply(pid int) error {
|
||||
func (c *Cgroup) Apply(pid int) (ActiveCgroup, error) {
|
||||
// We have two implementation of cgroups support, one is based on
|
||||
// systemd and the dbus api, and one is based on raw cgroup fs operations
|
||||
// following the pre-single-writer model docs at:
|
||||
// http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/
|
||||
//
|
||||
// we can pick any subsystem to find the root
|
||||
cgroupRoot, err := FindCgroupMountpoint("cpu")
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if useSystemd() {
|
||||
return systemdApply(c, pid)
|
||||
} else {
|
||||
return rawApply(c, pid)
|
||||
}
|
||||
cgroupRoot = filepath.Dir(cgroupRoot)
|
||||
|
||||
if _, err := os.Stat(cgroupRoot); err != nil {
|
||||
return fmt.Errorf("cgroups fs not found")
|
||||
}
|
||||
if err := c.setupDevices(cgroupRoot, pid); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.setupMemory(cgroupRoot, pid); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.setupCpu(cgroupRoot, pid); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.setupCpuset(cgroupRoot, pid); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cgroup) setupDevices(cgroupRoot string, pid int) (err error) {
|
||||
if !c.DeviceAccess {
|
||||
dir, err := c.Join(cgroupRoot, "devices", pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := writeFile(dir, "devices.deny", "a"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allow := []string{
|
||||
// /dev/null, zero, full
|
||||
"c 1:3 rwm",
|
||||
"c 1:5 rwm",
|
||||
"c 1:7 rwm",
|
||||
|
||||
// consoles
|
||||
"c 5:1 rwm",
|
||||
"c 5:0 rwm",
|
||||
"c 4:0 rwm",
|
||||
"c 4:1 rwm",
|
||||
|
||||
// /dev/urandom,/dev/random
|
||||
"c 1:9 rwm",
|
||||
"c 1:8 rwm",
|
||||
|
||||
// /dev/pts/ - pts namespaces are "coming soon"
|
||||
"c 136:* rwm",
|
||||
"c 5:2 rwm",
|
||||
|
||||
// tuntap
|
||||
"c 10:200 rwm",
|
||||
}
|
||||
|
||||
for _, val := range allow {
|
||||
if err := writeFile(dir, "devices.allow", val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cgroup) setupMemory(cgroupRoot string, pid int) (err error) {
|
||||
if c.Memory != 0 || c.MemorySwap != 0 {
|
||||
dir, err := c.Join(cgroupRoot, "memory", pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}()
|
||||
|
||||
if c.Memory != 0 {
|
||||
if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// By default, MemorySwap is set to twice the size of RAM.
|
||||
// If you want to omit MemorySwap, set it to `-1'.
|
||||
if c.MemorySwap != -1 {
|
||||
if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Memory*2, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cgroup) setupCpu(cgroupRoot string, pid int) (err error) {
|
||||
// We always want to join the cpu group, to allow fair cpu scheduling
|
||||
// on a container basis
|
||||
dir, err := c.Join(cgroupRoot, "cpu", pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.CpuShares != 0 {
|
||||
if err := writeFile(dir, "cpu.shares", strconv.FormatInt(c.CpuShares, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cgroup) setupCpuset(cgroupRoot string, pid int) (err error) {
|
||||
|
|
|
@ -66,7 +66,6 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str
|
|||
"-p", proto,
|
||||
"-d", daddr,
|
||||
"--dport", strconv.Itoa(port),
|
||||
"!", "-i", c.Bridge,
|
||||
"-j", "DNAT",
|
||||
"--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil {
|
||||
return err
|
||||
|
|
23
pkg/label/label.go
Normal file
23
pkg/label/label.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
// +build !selinux !linux
|
||||
|
||||
package label
|
||||
|
||||
func GenLabels(options string) (string, string, error) {
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
func FormatMountLabel(src string, MountLabel string) string {
|
||||
return src
|
||||
}
|
||||
|
||||
func SetProcessLabel(processLabel string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetFileLabel(path string, fileLabel string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetPidCon(pid int) (string, error) {
|
||||
return "", nil
|
||||
}
|
69
pkg/label/label_selinux.go
Normal file
69
pkg/label/label_selinux.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
// +build selinux,linux
|
||||
|
||||
package label
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/selinux"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GenLabels(options string) (string, string, error) {
|
||||
processLabel, mountLabel := selinux.GetLxcContexts()
|
||||
var err error
|
||||
if processLabel == "" { // SELinux is disabled
|
||||
return "", "", err
|
||||
}
|
||||
s := strings.Fields(options)
|
||||
l := len(s)
|
||||
if l > 0 {
|
||||
pcon := selinux.NewContext(processLabel)
|
||||
for i := 0; i < l; i++ {
|
||||
o := strings.Split(s[i], "=")
|
||||
pcon[o[0]] = o[1]
|
||||
}
|
||||
processLabel = pcon.Get()
|
||||
mountLabel, err = selinux.CopyLevel(processLabel, mountLabel)
|
||||
}
|
||||
return processLabel, mountLabel, err
|
||||
}
|
||||
|
||||
func FormatMountLabel(src string, MountLabel string) string {
|
||||
var mountLabel string
|
||||
if src != "" {
|
||||
mountLabel = src
|
||||
if MountLabel != "" {
|
||||
mountLabel = fmt.Sprintf("%s,context=\"%s\"", mountLabel, MountLabel)
|
||||
}
|
||||
} else {
|
||||
if MountLabel != "" {
|
||||
mountLabel = fmt.Sprintf("context=\"%s\"", MountLabel)
|
||||
}
|
||||
}
|
||||
return mountLabel
|
||||
}
|
||||
|
||||
func SetProcessLabel(processLabel string) error {
|
||||
if selinux.SelinuxEnabled() {
|
||||
return selinux.Setexeccon(processLabel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetProcessLabel() (string, error) {
|
||||
if selinux.SelinuxEnabled() {
|
||||
return selinux.Getexeccon()
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func SetFileLabel(path string, fileLabel string) error {
|
||||
if selinux.SelinuxEnabled() && fileLabel != "" {
|
||||
return selinux.Setfilecon(path, fileLabel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetPidCon(pid int) (string, error) {
|
||||
return selinux.Getpidcon(pid)
|
||||
}
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"github.com/dotcloud/docker/pkg/cgroups"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer/network"
|
||||
"github.com/dotcloud/docker/pkg/system"
|
||||
|
@ -62,10 +63,15 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [
|
|||
// Do this before syncing with child so that no children
|
||||
// can escape the cgroup
|
||||
ns.logger.Println("setting cgroups")
|
||||
if err := ns.SetupCgroups(container, command.Process.Pid); err != nil {
|
||||
activeCgroup, err := ns.SetupCgroups(container, command.Process.Pid)
|
||||
if err != nil {
|
||||
command.Process.Kill()
|
||||
return -1, err
|
||||
}
|
||||
if activeCgroup != nil {
|
||||
defer activeCgroup.Cleanup()
|
||||
}
|
||||
|
||||
ns.logger.Println("setting up network")
|
||||
if err := ns.InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil {
|
||||
command.Process.Kill()
|
||||
|
@ -86,13 +92,11 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [
|
|||
return status, err
|
||||
}
|
||||
|
||||
func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) error {
|
||||
func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) {
|
||||
if container.Cgroups != nil {
|
||||
if err := container.Cgroups.Apply(nspid); err != nil {
|
||||
return err
|
||||
}
|
||||
return container.Cgroups.Apply(nspid)
|
||||
}
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (ns *linuxNs) InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error {
|
||||
|
|
|
@ -4,6 +4,7 @@ package nsinit
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/pkg/system"
|
||||
"os"
|
||||
|
@ -32,7 +33,11 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s
|
|||
closeFds()
|
||||
return -1, err
|
||||
}
|
||||
|
||||
processLabel, err := label.GetPidCon(nspid)
|
||||
if err != nil {
|
||||
closeFds()
|
||||
return -1, err
|
||||
}
|
||||
// foreach namespace fd, use setns to join an existing container's namespaces
|
||||
for _, fd := range fds {
|
||||
if fd > 0 {
|
||||
|
@ -80,6 +85,10 @@ dropAndExec:
|
|||
if err := finalizeNamespace(container); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
err = label.SetProcessLabel(processLabel)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if err := system.Execv(args[0], args[0:], container.Env); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
|
|
@ -5,8 +5,10 @@ package nsinit
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer/capabilities"
|
||||
|
@ -61,7 +63,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol
|
|||
return fmt.Errorf("setup networking %s", err)
|
||||
}
|
||||
ns.logger.Println("setup mount namespace")
|
||||
if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot); err != nil {
|
||||
if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot, container.Context["mount_label"]); err != nil {
|
||||
return fmt.Errorf("setup mount namespace %s", err)
|
||||
}
|
||||
if err := system.Sethostname(container.Hostname); err != nil {
|
||||
|
@ -77,6 +79,10 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol
|
|||
return err
|
||||
}
|
||||
}
|
||||
runtime.LockOSThread()
|
||||
if err := label.SetProcessLabel(container.Context["process_label"]); err != nil {
|
||||
return fmt.Errorf("SetProcessLabel label %s", err)
|
||||
}
|
||||
ns.logger.Printf("execing %s\n", args[0])
|
||||
return system.Execv(args[0], args[0:], container.Env)
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ package nsinit
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/pkg/system"
|
||||
"io/ioutil"
|
||||
|
@ -20,7 +21,7 @@ const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NOD
|
|||
//
|
||||
// There is no need to unmount the new mounts because as soon as the mount namespace
|
||||
// is no longer in use, the mounts will be removed automatically
|
||||
func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, console string, readonly, noPivotRoot bool) error {
|
||||
func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, console string, readonly, noPivotRoot bool, mountLabel string) error {
|
||||
flag := syscall.MS_PRIVATE
|
||||
if noPivotRoot {
|
||||
flag = syscall.MS_SLAVE
|
||||
|
@ -31,7 +32,7 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons
|
|||
if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
|
||||
return fmt.Errorf("mouting %s as bind %s", rootfs, err)
|
||||
}
|
||||
if err := mountSystem(rootfs); err != nil {
|
||||
if err := mountSystem(rootfs, mountLabel); err != nil {
|
||||
return fmt.Errorf("mount system %s", err)
|
||||
}
|
||||
|
||||
|
@ -59,7 +60,7 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons
|
|||
if err := setupDev(rootfs); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setupPtmx(rootfs, console); err != nil {
|
||||
if err := setupPtmx(rootfs, console, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.Chdir(rootfs); err != nil {
|
||||
|
@ -197,7 +198,7 @@ func setupDev(rootfs string) error {
|
|||
}
|
||||
|
||||
// setupConsole ensures that the container has a proper /dev/console setup
|
||||
func setupConsole(rootfs, console string) error {
|
||||
func setupConsole(rootfs, console string, mountLabel string) error {
|
||||
oldMask := system.Umask(0000)
|
||||
defer system.Umask(oldMask)
|
||||
|
||||
|
@ -221,6 +222,9 @@ func setupConsole(rootfs, console string) error {
|
|||
if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil {
|
||||
return fmt.Errorf("mknod %s %s", dest, err)
|
||||
}
|
||||
if err := label.SetFileLabel(console, mountLabel); err != nil {
|
||||
return fmt.Errorf("SetFileLabel Failed %s %s", dest, err)
|
||||
}
|
||||
if err := system.Mount(console, dest, "bind", syscall.MS_BIND, ""); err != nil {
|
||||
return fmt.Errorf("bind %s to %s %s", console, dest, err)
|
||||
}
|
||||
|
@ -229,7 +233,7 @@ func setupConsole(rootfs, console string) error {
|
|||
|
||||
// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts
|
||||
// inside the mount namespace
|
||||
func mountSystem(rootfs string) error {
|
||||
func mountSystem(rootfs string, mountLabel string) error {
|
||||
for _, m := range []struct {
|
||||
source string
|
||||
path string
|
||||
|
@ -239,8 +243,8 @@ func mountSystem(rootfs string) error {
|
|||
}{
|
||||
{source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags},
|
||||
{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags},
|
||||
{source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: "mode=1777,size=65536k"},
|
||||
{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: "newinstance,ptmxmode=0666,mode=620,gid=5"},
|
||||
{source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1755,size=65536k", mountLabel)},
|
||||
{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)},
|
||||
} {
|
||||
if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("mkdirall %s %s", m.path, err)
|
||||
|
@ -254,7 +258,7 @@ func mountSystem(rootfs string) error {
|
|||
|
||||
// setupPtmx adds a symlink to pts/ptmx for /dev/ptmx and
|
||||
// finishes setting up /dev/console
|
||||
func setupPtmx(rootfs, console string) error {
|
||||
func setupPtmx(rootfs, console string, mountLabel string) error {
|
||||
ptmx := filepath.Join(rootfs, "dev/ptmx")
|
||||
if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
|
@ -263,7 +267,7 @@ func setupPtmx(rootfs, console string) error {
|
|||
return fmt.Errorf("symlink dev ptmx %s", err)
|
||||
}
|
||||
if console != "" {
|
||||
if err := setupConsole(rootfs, console); err != nil {
|
||||
if err := setupConsole(rootfs, console, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
387
pkg/selinux/selinux.go
Normal file
387
pkg/selinux/selinux.go
Normal file
|
@ -0,0 +1,387 @@
|
|||
package selinux
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/mount"
|
||||
"github.com/dotcloud/docker/pkg/system"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
Enforcing = 1
|
||||
Permissive = 0
|
||||
Disabled = -1
|
||||
selinuxDir = "/etc/selinux/"
|
||||
selinuxConfig = selinuxDir + "config"
|
||||
selinuxTypeTag = "SELINUXTYPE"
|
||||
selinuxTag = "SELINUX"
|
||||
selinuxPath = "/sys/fs/selinux"
|
||||
xattrNameSelinux = "security.selinux"
|
||||
stRdOnly = 0x01
|
||||
)
|
||||
|
||||
var (
|
||||
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
|
||||
spaceRegex = regexp.MustCompile(`^([^=]+) (.*)$`)
|
||||
mcsList = make(map[string]bool)
|
||||
selinuxfs = "unknown"
|
||||
selinuxEnabled = false
|
||||
selinuxEnabledChecked = false
|
||||
)
|
||||
|
||||
type SELinuxContext map[string]string
|
||||
|
||||
func GetSelinuxMountPoint() string {
|
||||
if selinuxfs != "unknown" {
|
||||
return selinuxfs
|
||||
}
|
||||
selinuxfs = ""
|
||||
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return selinuxfs
|
||||
}
|
||||
for _, mount := range mounts {
|
||||
if mount.Fstype == "selinuxfs" {
|
||||
selinuxfs = mount.Mountpoint
|
||||
break
|
||||
}
|
||||
}
|
||||
if selinuxfs != "" {
|
||||
var buf syscall.Statfs_t
|
||||
syscall.Statfs(selinuxfs, &buf)
|
||||
if (buf.Flags & stRdOnly) == 1 {
|
||||
selinuxfs = ""
|
||||
}
|
||||
}
|
||||
return selinuxfs
|
||||
}
|
||||
|
||||
func SelinuxEnabled() bool {
|
||||
if selinuxEnabledChecked {
|
||||
return selinuxEnabled
|
||||
}
|
||||
selinuxEnabledChecked = true
|
||||
if fs := GetSelinuxMountPoint(); fs != "" {
|
||||
if con, _ := Getcon(); con != "kernel" {
|
||||
selinuxEnabled = true
|
||||
}
|
||||
}
|
||||
return selinuxEnabled
|
||||
}
|
||||
|
||||
func ReadConfig(target string) (value string) {
|
||||
var (
|
||||
val, key string
|
||||
bufin *bufio.Reader
|
||||
)
|
||||
|
||||
in, err := os.Open(selinuxConfig)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
bufin = bufio.NewReader(in)
|
||||
|
||||
for done := false; !done; {
|
||||
var line string
|
||||
if line, err = bufin.ReadString('\n'); err != nil {
|
||||
if err != io.EOF {
|
||||
return ""
|
||||
}
|
||||
done = true
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
// Skip blank lines
|
||||
continue
|
||||
}
|
||||
if line[0] == ';' || line[0] == '#' {
|
||||
// Skip comments
|
||||
continue
|
||||
}
|
||||
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
|
||||
key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
|
||||
if key == target {
|
||||
return strings.Trim(val, "\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetSELinuxPolicyRoot() string {
|
||||
return selinuxDir + ReadConfig(selinuxTypeTag)
|
||||
}
|
||||
|
||||
func readCon(name string) (string, error) {
|
||||
var val string
|
||||
|
||||
in, err := os.Open(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = fmt.Fscanf(in, "%s", &val)
|
||||
return val, err
|
||||
}
|
||||
|
||||
func Setfilecon(path string, scon string) error {
|
||||
return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0)
|
||||
}
|
||||
|
||||
func Getfilecon(path string) (string, error) {
|
||||
var scon []byte
|
||||
|
||||
cnt, err := syscall.Getxattr(path, xattrNameSelinux, scon)
|
||||
scon = make([]byte, cnt)
|
||||
cnt, err = syscall.Getxattr(path, xattrNameSelinux, scon)
|
||||
return string(scon), err
|
||||
}
|
||||
|
||||
func Setfscreatecon(scon string) error {
|
||||
return writeCon("/proc/self/attr/fscreate", scon)
|
||||
}
|
||||
|
||||
func Getfscreatecon() (string, error) {
|
||||
return readCon("/proc/self/attr/fscreate")
|
||||
}
|
||||
|
||||
func Getcon() (string, error) {
|
||||
return readCon("/proc/self/attr/current")
|
||||
}
|
||||
|
||||
func Getpidcon(pid int) (string, error) {
|
||||
return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
|
||||
}
|
||||
|
||||
func Getexeccon() (string, error) {
|
||||
return readCon("/proc/self/attr/exec")
|
||||
}
|
||||
|
||||
func writeCon(name string, val string) error {
|
||||
if !SelinuxEnabled() {
|
||||
return nil
|
||||
}
|
||||
out, err := os.OpenFile(name, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if val != "" {
|
||||
_, err = out.Write([]byte(val))
|
||||
} else {
|
||||
_, err = out.Write(nil)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func Setexeccon(scon string) error {
|
||||
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon)
|
||||
}
|
||||
|
||||
func (c SELinuxContext) Get() string {
|
||||
return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
|
||||
}
|
||||
|
||||
func NewContext(scon string) SELinuxContext {
|
||||
c := make(SELinuxContext)
|
||||
|
||||
if len(scon) != 0 {
|
||||
con := strings.SplitN(scon, ":", 4)
|
||||
c["user"] = con[0]
|
||||
c["role"] = con[1]
|
||||
c["type"] = con[2]
|
||||
c["level"] = con[3]
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func SelinuxGetEnforce() int {
|
||||
var enforce int
|
||||
|
||||
enforceS, err := readCon(fmt.Sprintf("%s/enforce", selinuxPath))
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
enforce, err = strconv.Atoi(string(enforceS))
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return enforce
|
||||
}
|
||||
|
||||
func SelinuxGetEnforceMode() int {
|
||||
switch ReadConfig(selinuxTag) {
|
||||
case "enforcing":
|
||||
return Enforcing
|
||||
case "permissive":
|
||||
return Permissive
|
||||
}
|
||||
return Disabled
|
||||
}
|
||||
|
||||
func mcsAdd(mcs string) {
|
||||
mcsList[mcs] = true
|
||||
}
|
||||
|
||||
func mcsDelete(mcs string) {
|
||||
mcsList[mcs] = false
|
||||
}
|
||||
|
||||
func mcsExists(mcs string) bool {
|
||||
return mcsList[mcs]
|
||||
}
|
||||
|
||||
func IntToMcs(id int, catRange uint32) string {
|
||||
var (
|
||||
SETSIZE = int(catRange)
|
||||
TIER = SETSIZE
|
||||
ORD = id
|
||||
)
|
||||
|
||||
if id < 1 || id > 523776 {
|
||||
return ""
|
||||
}
|
||||
|
||||
for ORD > TIER {
|
||||
ORD = ORD - TIER
|
||||
TIER -= 1
|
||||
}
|
||||
TIER = SETSIZE - TIER
|
||||
ORD = ORD + TIER
|
||||
return fmt.Sprintf("s0:c%d,c%d", TIER, ORD)
|
||||
}
|
||||
|
||||
func uniqMcs(catRange uint32) string {
|
||||
var (
|
||||
n uint32
|
||||
c1, c2 uint32
|
||||
mcs string
|
||||
)
|
||||
|
||||
for {
|
||||
binary.Read(rand.Reader, binary.LittleEndian, &n)
|
||||
c1 = n % catRange
|
||||
binary.Read(rand.Reader, binary.LittleEndian, &n)
|
||||
c2 = n % catRange
|
||||
if c1 == c2 {
|
||||
continue
|
||||
} else {
|
||||
if c1 > c2 {
|
||||
t := c1
|
||||
c1 = c2
|
||||
c2 = t
|
||||
}
|
||||
}
|
||||
mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
|
||||
if mcsExists(mcs) {
|
||||
continue
|
||||
}
|
||||
mcsAdd(mcs)
|
||||
break
|
||||
}
|
||||
return mcs
|
||||
}
|
||||
|
||||
func FreeContext(con string) {
|
||||
if con != "" {
|
||||
scon := NewContext(con)
|
||||
mcsDelete(scon["level"])
|
||||
}
|
||||
}
|
||||
|
||||
func GetLxcContexts() (processLabel string, fileLabel string) {
|
||||
var (
|
||||
val, key string
|
||||
bufin *bufio.Reader
|
||||
)
|
||||
|
||||
if !SelinuxEnabled() {
|
||||
return "", ""
|
||||
}
|
||||
lxcPath := fmt.Sprintf("%s/content/lxc_contexts", GetSELinuxPolicyRoot())
|
||||
fileLabel = "system_u:object_r:svirt_sandbox_file_t:s0"
|
||||
processLabel = "system_u:system_r:svirt_lxc_net_t:s0"
|
||||
|
||||
in, err := os.Open(lxcPath)
|
||||
if err != nil {
|
||||
goto exit
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
bufin = bufio.NewReader(in)
|
||||
|
||||
for done := false; !done; {
|
||||
var line string
|
||||
if line, err = bufin.ReadString('\n'); err != nil {
|
||||
if err == io.EOF {
|
||||
done = true
|
||||
} else {
|
||||
goto exit
|
||||
}
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
// Skip blank lines
|
||||
continue
|
||||
}
|
||||
if line[0] == ';' || line[0] == '#' {
|
||||
// Skip comments
|
||||
continue
|
||||
}
|
||||
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
|
||||
key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
|
||||
if key == "process" {
|
||||
processLabel = strings.Trim(val, "\"")
|
||||
}
|
||||
if key == "file" {
|
||||
fileLabel = strings.Trim(val, "\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
exit:
|
||||
mcs := IntToMcs(os.Getpid(), 1024)
|
||||
scon := NewContext(processLabel)
|
||||
scon["level"] = mcs
|
||||
processLabel = scon.Get()
|
||||
scon = NewContext(fileLabel)
|
||||
scon["level"] = mcs
|
||||
fileLabel = scon.Get()
|
||||
return processLabel, fileLabel
|
||||
}
|
||||
|
||||
func SecurityCheckContext(val string) error {
|
||||
return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
|
||||
}
|
||||
|
||||
func CopyLevel(src, dest string) (string, error) {
|
||||
if !SelinuxEnabled() {
|
||||
return "", nil
|
||||
}
|
||||
if src == "" {
|
||||
return "", nil
|
||||
}
|
||||
if err := SecurityCheckContext(src); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := SecurityCheckContext(dest); err != nil {
|
||||
return "", err
|
||||
}
|
||||
scon := NewContext(src)
|
||||
tcon := NewContext(dest)
|
||||
tcon["level"] = scon["level"]
|
||||
return tcon.Get(), nil
|
||||
}
|
64
pkg/selinux/selinux_test.go
Normal file
64
pkg/selinux/selinux_test.go
Normal file
|
@ -0,0 +1,64 @@
|
|||
package selinux_test
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/pkg/selinux"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testSetfilecon(t *testing.T) {
|
||||
if selinux.SelinuxEnabled() {
|
||||
tmp := "selinux_test"
|
||||
out, _ := os.OpenFile(tmp, os.O_WRONLY, 0)
|
||||
out.Close()
|
||||
err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0")
|
||||
if err == nil {
|
||||
t.Log(selinux.Getfilecon(tmp))
|
||||
} else {
|
||||
t.Log("Setfilecon failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
os.Remove(tmp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSELinux(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
plabel, flabel string
|
||||
)
|
||||
|
||||
if selinux.SelinuxEnabled() {
|
||||
t.Log("Enabled")
|
||||
plabel, flabel = selinux.GetLxcContexts()
|
||||
t.Log(plabel)
|
||||
t.Log(flabel)
|
||||
plabel, flabel = selinux.GetLxcContexts()
|
||||
t.Log(plabel)
|
||||
t.Log(flabel)
|
||||
t.Log("getenforce ", selinux.SelinuxGetEnforce())
|
||||
t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode())
|
||||
pid := os.Getpid()
|
||||
t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023))
|
||||
t.Log(selinux.Getcon())
|
||||
t.Log(selinux.Getfilecon("/etc/passwd"))
|
||||
err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0")
|
||||
if err == nil {
|
||||
t.Log(selinux.Getfscreatecon())
|
||||
} else {
|
||||
t.Log("setfscreatecon failed", err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = selinux.Setfscreatecon("")
|
||||
if err == nil {
|
||||
t.Log(selinux.Getfscreatecon())
|
||||
} else {
|
||||
t.Log("setfscreatecon failed", err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(selinux.Getpidcon(1))
|
||||
t.Log(selinux.GetSelinuxMountPoint())
|
||||
} else {
|
||||
t.Log("Disabled")
|
||||
}
|
||||
}
|
15
pkg/systemd/booted.go
Normal file
15
pkg/systemd/booted.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package systemd
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Conversion to Go of systemd's sd_booted()
|
||||
func SdBooted() bool {
|
||||
s, err := os.Stat("/run/systemd/system")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return s.IsDir()
|
||||
}
|
|
@ -5,7 +5,7 @@ import (
|
|||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/dotcloud/docker/pkg/systemd/activation"
|
||||
"github.com/coreos/go-systemd/activation"
|
||||
)
|
||||
|
||||
// ListenFD returns the specified socket activated files as a slice of
|
||||
|
|
|
@ -206,4 +206,8 @@ func TestValidRepositoryName(t *testing.T) {
|
|||
t.Log("Repository name should be invalid")
|
||||
t.Fail()
|
||||
}
|
||||
if err := validateRepositoryName("docker///docker"); err == nil {
|
||||
t.Log("Repository name should be invalid")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
package runconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
)
|
||||
|
||||
// Note: the Config structure should hold only portable information about the container.
|
||||
|
@ -34,9 +36,17 @@ type Config struct {
|
|||
Entrypoint []string
|
||||
NetworkDisabled bool
|
||||
OnBuild []string
|
||||
Context execdriver.Context
|
||||
}
|
||||
|
||||
func ContainerConfigFromJob(job *engine.Job) *Config {
|
||||
var context execdriver.Context
|
||||
val := job.Getenv("Context")
|
||||
if val != "" {
|
||||
if err := json.Unmarshal([]byte(val), &context); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
config := &Config{
|
||||
Hostname: job.Getenv("Hostname"),
|
||||
Domainname: job.Getenv("Domainname"),
|
||||
|
@ -54,6 +64,7 @@ func ContainerConfigFromJob(job *engine.Job) *Config {
|
|||
VolumesFrom: job.Getenv("VolumesFrom"),
|
||||
WorkingDir: job.Getenv("WorkingDir"),
|
||||
NetworkDisabled: job.GetenvBool("NetworkDisabled"),
|
||||
Context: context,
|
||||
}
|
||||
job.GetenvJson("ExposedPorts", &config.ExposedPorts)
|
||||
job.GetenvJson("Volumes", &config.Volumes)
|
||||
|
|
|
@ -247,7 +247,7 @@ func TestMerge(t *testing.T) {
|
|||
volumesUser := make(map[string]struct{})
|
||||
volumesUser["/test3"] = struct{}{}
|
||||
configUser := &Config{
|
||||
Dns: []string{"3.3.3.3"},
|
||||
Dns: []string{"2.2.2.2", "3.3.3.3"},
|
||||
PortSpecs: []string{"3333:2222", "3333:3333"},
|
||||
Env: []string{"VAR2=3", "VAR3=3"},
|
||||
Volumes: volumesUser,
|
||||
|
|
|
@ -3,12 +3,13 @@ package runconfig
|
|||
import (
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
type HostConfig struct {
|
||||
Binds []string
|
||||
ContainerIDFile string
|
||||
LxcConf []KeyValuePair
|
||||
LxcConf []utils.KeyValuePair
|
||||
Privileged bool
|
||||
PortBindings nat.PortMap
|
||||
Links []string
|
||||
|
|
|
@ -97,8 +97,15 @@ func Merge(userConf, imageConf *Config) error {
|
|||
if userConf.Dns == nil || len(userConf.Dns) == 0 {
|
||||
userConf.Dns = imageConf.Dns
|
||||
} else {
|
||||
//duplicates aren't an issue here
|
||||
userConf.Dns = append(userConf.Dns, imageConf.Dns...)
|
||||
dnsSet := make(map[string]struct{}, len(userConf.Dns))
|
||||
for _, dns := range userConf.Dns {
|
||||
dnsSet[dns] = struct{}{}
|
||||
}
|
||||
for _, dns := range imageConf.Dns {
|
||||
if _, exists := dnsSet[dns]; !exists {
|
||||
userConf.Dns = append(userConf.Dns, dns)
|
||||
}
|
||||
}
|
||||
}
|
||||
if userConf.DnsSearch == nil || len(userConf.DnsSearch) == 0 {
|
||||
userConf.DnsSearch = imageConf.DnsSearch
|
||||
|
|
|
@ -4,8 +4,10 @@ import (
|
|||
"fmt"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/opts"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
flag "github.com/dotcloud/docker/pkg/mflag"
|
||||
"github.com/dotcloud/docker/pkg/sysinfo"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
|
@ -32,6 +34,10 @@ func ParseSubcommand(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo)
|
|||
}
|
||||
|
||||
func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) {
|
||||
var (
|
||||
processLabel string
|
||||
mountLabel string
|
||||
)
|
||||
var (
|
||||
// FIXME: use utils.ListOpts for attach and volumes?
|
||||
flAttach = opts.NewListOpts(opts.ValidateAttach)
|
||||
|
@ -61,6 +67,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
|
|||
flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID")
|
||||
flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container")
|
||||
flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
|
||||
flLabelOptions = cmd.String([]string{"Z", "-label"}, "", "Options to pass to underlying labeling system")
|
||||
|
||||
// For documentation purpose
|
||||
_ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
|
||||
|
@ -77,7 +84,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
|
|||
cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers")
|
||||
cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains")
|
||||
cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)")
|
||||
cmd.Var(&flLxcOpts, []string{"#lxc-conf", "#-lxc-conf"}, "Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
|
||||
cmd.Var(&flLxcOpts, []string{"#lxc-conf", "#-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
|
||||
cmd.Var(&flDriverOpts, []string{"o", "-opt"}, "Add custom driver options")
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
|
@ -152,7 +159,16 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
|
|||
entrypoint = []string{*flEntrypoint}
|
||||
}
|
||||
|
||||
lxcConf, err := parseLxcConfOpts(flLxcOpts)
|
||||
if !*flPrivileged {
|
||||
pLabel, mLabel, e := label.GenLabels(*flLabelOptions)
|
||||
if e != nil {
|
||||
return nil, nil, cmd, fmt.Errorf("Invalid security labels : %s", e)
|
||||
}
|
||||
processLabel = pLabel
|
||||
mountLabel = mLabel
|
||||
}
|
||||
|
||||
lxcConf, err := parseKeyValueOpts(flLxcOpts)
|
||||
if err != nil {
|
||||
return nil, nil, cmd, err
|
||||
}
|
||||
|
@ -206,6 +222,15 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
|
|||
VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","),
|
||||
Entrypoint: entrypoint,
|
||||
WorkingDir: *flWorkingDir,
|
||||
Context: execdriver.Context{
|
||||
"mount_label": mountLabel,
|
||||
"process_label": processLabel,
|
||||
},
|
||||
}
|
||||
|
||||
driverOptions, err := parseDriverOpts(flDriverOpts)
|
||||
if err != nil {
|
||||
return nil, nil, cmd, err
|
||||
}
|
||||
|
||||
pluginOptions, err := parseDriverOpts(flDriverOpts)
|
||||
|
@ -221,7 +246,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
|
|||
PortBindings: portBindings,
|
||||
Links: flLinks.GetAll(),
|
||||
PublishAllPorts: *flPublishAll,
|
||||
DriverOptions: pluginOptions,
|
||||
DriverOptions: driverOptions,
|
||||
}
|
||||
|
||||
if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit {
|
||||
|
@ -236,24 +261,33 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
|
|||
return config, hostConfig, cmd, nil
|
||||
}
|
||||
|
||||
func parseLxcConfOpts(opts opts.ListOpts) ([]KeyValuePair, error) {
|
||||
out := make([]KeyValuePair, opts.Len())
|
||||
for i, o := range opts.GetAll() {
|
||||
k, v, err := parseLxcOpt(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// options will come in the format of name.key=value or name.option
|
||||
func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) {
|
||||
out := make(map[string][]string, len(opts.GetAll()))
|
||||
for _, o := range opts.GetAll() {
|
||||
parts := strings.SplitN(o, ".", 2)
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("invalid opt format %s", o)
|
||||
}
|
||||
out[i] = KeyValuePair{Key: k, Value: v}
|
||||
values, exists := out[parts[0]]
|
||||
if !exists {
|
||||
values = []string{}
|
||||
}
|
||||
out[parts[0]] = append(values, parts[1])
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func parseLxcOpt(opt string) (string, string, error) {
|
||||
parts := strings.SplitN(opt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt)
|
||||
func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) {
|
||||
out := make([]utils.KeyValuePair, opts.Len())
|
||||
for i, o := range opts.GetAll() {
|
||||
k, v, err := utils.ParseKeyValueOpt(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[i] = utils.KeyValuePair{Key: k, Value: v}
|
||||
}
|
||||
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// options will come in the format of name.type=value
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package runconfig
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -8,7 +9,7 @@ func TestParseLxcConfOpt(t *testing.T) {
|
|||
opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
|
||||
|
||||
for _, o := range opts {
|
||||
k, v, err := parseLxcOpt(o)
|
||||
k, v, err := utils.ParseKeyValueOpt(o)
|
||||
if err != nil {
|
||||
t.FailNow()
|
||||
}
|
||||
|
|
|
@ -404,6 +404,7 @@ func populateCommand(c *Container) {
|
|||
User: c.Config.User,
|
||||
Config: driverConfig,
|
||||
Resources: resources,
|
||||
Context: c.Config.Context,
|
||||
}
|
||||
c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
|
||||
}
|
||||
|
@ -537,8 +538,18 @@ func (container *Container) Start() (err error) {
|
|||
|
||||
if container.Config.WorkingDir != "" {
|
||||
container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
|
||||
if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil {
|
||||
return nil
|
||||
|
||||
pthInfo, err := os.Stat(path.Join(container.basefs, container.Config.WorkingDir))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pthInfo != nil && !pthInfo.IsDir() {
|
||||
return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -905,12 +916,20 @@ func (container *Container) Stop(seconds int) error {
|
|||
|
||||
// 1. Send a SIGTERM
|
||||
if err := container.KillSig(15); err != nil {
|
||||
return err
|
||||
utils.Debugf("Error sending kill SIGTERM: %s", err)
|
||||
log.Print("Failed to send SIGTERM to the process, force killing")
|
||||
if err := container.KillSig(9); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Wait for the process to exit on its own
|
||||
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
|
||||
return err
|
||||
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
|
||||
// 3. If it doesn't, then send SIGKILL
|
||||
if err := container.Kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -952,10 +971,11 @@ func (container *Container) ExportRw() (archive.Archive, error) {
|
|||
return nil, err
|
||||
}
|
||||
return utils.NewReadCloserWrapper(archive, func() error {
|
||||
err := archive.Close()
|
||||
container.Unmount()
|
||||
return err
|
||||
}), nil
|
||||
err := archive.Close()
|
||||
container.Unmount()
|
||||
return err
|
||||
}),
|
||||
nil
|
||||
}
|
||||
|
||||
func (container *Container) Export() (archive.Archive, error) {
|
||||
|
@ -969,10 +989,11 @@ func (container *Container) Export() (archive.Archive, error) {
|
|||
return nil, err
|
||||
}
|
||||
return utils.NewReadCloserWrapper(archive, func() error {
|
||||
err := archive.Close()
|
||||
container.Unmount()
|
||||
return err
|
||||
}), nil
|
||||
err := archive.Close()
|
||||
container.Unmount()
|
||||
return err
|
||||
}),
|
||||
nil
|
||||
}
|
||||
|
||||
func (container *Container) WaitTimeout(timeout time.Duration) error {
|
||||
|
@ -1121,10 +1142,11 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
|
|||
return nil, err
|
||||
}
|
||||
return utils.NewReadCloserWrapper(archive, func() error {
|
||||
err := archive.Close()
|
||||
container.Unmount()
|
||||
return err
|
||||
}), nil
|
||||
err := archive.Close()
|
||||
container.Unmount()
|
||||
return err
|
||||
}),
|
||||
nil
|
||||
}
|
||||
|
||||
// Returns true if the container exposes a certain port
|
||||
|
|
|
@ -7,6 +7,10 @@ import (
|
|||
"os/exec"
|
||||
)
|
||||
|
||||
// Context is a generic key value pair that allows
|
||||
// arbatrary data to be sent
|
||||
type Context map[string]string
|
||||
|
||||
var (
|
||||
ErrNotRunning = errors.New("Process could not be started")
|
||||
ErrWaitTimeoutReached = errors.New("Wait timeout reached")
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package lxc
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
@ -29,6 +30,10 @@ lxc.pts = 1024
|
|||
|
||||
# disable the main console
|
||||
lxc.console = none
|
||||
{{if getProcessLabel .Context}}
|
||||
lxc.se_context = {{ getProcessLabel .Context}}
|
||||
{{$MOUNTLABEL := getMountLabel .Context}}
|
||||
{{end}}
|
||||
|
||||
# no controlling tty at all
|
||||
lxc.tty = 1
|
||||
|
@ -85,8 +90,8 @@ lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noe
|
|||
lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0
|
||||
{{end}}
|
||||
|
||||
lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0
|
||||
lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0
|
||||
lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" "$MOUNTLABEL"}} 0 0
|
||||
lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" "$MOUNTLABEL"}} 0 0
|
||||
|
||||
{{range $value := .Mounts}}
|
||||
{{if $value.Writable}}
|
||||
|
@ -142,11 +147,22 @@ func getMemorySwap(v *execdriver.Resources) int64 {
|
|||
return v.Memory * 2
|
||||
}
|
||||
|
||||
func getProcessLabel(c execdriver.Context) string {
|
||||
return c["process_label"]
|
||||
}
|
||||
|
||||
func getMountLabel(c execdriver.Context) string {
|
||||
return c["mount_label"]
|
||||
}
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
funcMap := template.FuncMap{
|
||||
"getMemorySwap": getMemorySwap,
|
||||
"getProcessLabel": getProcessLabel,
|
||||
"getMountLabel": getMountLabel,
|
||||
"escapeFstabSpaces": escapeFstabSpaces,
|
||||
"formatMountLabel": label.FormatMountLabel,
|
||||
}
|
||||
LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)
|
||||
if err != nil {
|
||||
|
|
|
@ -75,10 +75,11 @@ func TestCustomLxcConfig(t *testing.T) {
|
|||
command := &execdriver.Command{
|
||||
ID: "1",
|
||||
Privileged: false,
|
||||
Config: map[string][]string{"lxc": {
|
||||
"lxc.utsname = docker",
|
||||
"lxc.cgroup.cpuset.cpus = 0,1",
|
||||
},
|
||||
Config: map[string][]string{
|
||||
"lxc": {
|
||||
"lxc.utsname = docker",
|
||||
"lxc.cgroup.cpuset.cpus = 0,1",
|
||||
},
|
||||
},
|
||||
Network: &execdriver.Network{
|
||||
Mtu: 1500,
|
||||
|
|
|
@ -134,7 +134,7 @@ func (a Driver) Exists(id string) bool {
|
|||
|
||||
// Three folders are created for each id
|
||||
// mnt, layers, and diff
|
||||
func (a *Driver) Create(id, parent string) error {
|
||||
func (a *Driver) Create(id, parent string, mountLabel string) error {
|
||||
if err := a.createDirsFor(id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ func TestCreateNewDir(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func TestCreateNewDirStructure(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ func TestRemoveImage(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ func TestGetWithoutParent(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,7 @@ func TestCleanupWithDir(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ func TestMountedFalseResponse(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -204,10 +204,10 @@ func TestMountedTrueReponse(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -230,10 +230,10 @@ func TestMountWithParent(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -261,10 +261,10 @@ func TestRemoveMountedDir(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -300,7 +300,7 @@ func TestCreateWithInvalidParent(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "docker"); err == nil {
|
||||
if err := d.Create("1", "docker", ""); err == nil {
|
||||
t.Fatalf("Error should not be nil with parent does not exist")
|
||||
}
|
||||
}
|
||||
|
@ -309,7 +309,7 @@ func TestGetDiff(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -343,10 +343,10 @@ func TestChanges(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -392,7 +392,7 @@ func TestChanges(t *testing.T) {
|
|||
t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind)
|
||||
}
|
||||
|
||||
if err := d.Create("3", "2"); err != nil {
|
||||
if err := d.Create("3", "2", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mntPoint, err = d.Get("3")
|
||||
|
@ -437,7 +437,7 @@ func TestDiffSize(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -479,7 +479,7 @@ func TestChildDiffSize(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -515,7 +515,7 @@ func TestChildDiffSize(t *testing.T) {
|
|||
t.Fatalf("Expected size to be %d got %d", size, diffSize)
|
||||
}
|
||||
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -534,7 +534,7 @@ func TestExists(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -552,7 +552,7 @@ func TestStatus(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -581,7 +581,7 @@ func TestApplyDiff(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -607,10 +607,10 @@ func TestApplyDiff(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := d.Create("2", ""); err != nil {
|
||||
if err := d.Create("2", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("3", "2"); err != nil {
|
||||
if err := d.Create("3", "2", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -656,7 +656,7 @@ func TestMountMoreThan42Layers(t *testing.T) {
|
|||
}
|
||||
current = hash(current)
|
||||
|
||||
if err := d.Create(current, parent); err != nil {
|
||||
if err := d.Create(current, parent, ""); err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e
|
|||
}
|
||||
|
||||
initID := fmt.Sprintf("%s-init", id)
|
||||
if err := a.Create(initID, metadata.Image); err != nil {
|
||||
if err := a.Create(initID, metadata.Image, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e
|
|||
return err
|
||||
}
|
||||
|
||||
if err := a.Create(id, initID); err != nil {
|
||||
if err := a.Create(id, initID, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool)
|
|||
return err
|
||||
}
|
||||
if !a.Exists(m.ID) {
|
||||
if err := a.Create(m.ID, m.ParentID); err != nil {
|
||||
if err := a.Create(m.ID, m.ParentID, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ func getDirFd(dir *C.DIR) uintptr {
|
|||
return uintptr(C.dirfd(dir))
|
||||
}
|
||||
|
||||
func subvolCreate(path, name string) error {
|
||||
func subvolCreate(path, name string, mountLabel string) error {
|
||||
dir, err := openDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -155,13 +155,13 @@ func (d *Driver) subvolumesDirId(id string) string {
|
|||
return path.Join(d.subvolumesDir(), id)
|
||||
}
|
||||
|
||||
func (d *Driver) Create(id string, parent string) error {
|
||||
func (d *Driver) Create(id string, parent string, mountLabel string) error {
|
||||
subvolumes := path.Join(d.home, "subvolumes")
|
||||
if err := os.MkdirAll(subvolumes, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
if parent == "" {
|
||||
if err := subvolCreate(subvolumes, id); err != nil {
|
||||
if err := subvolCreate(subvolumes, id, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -14,6 +15,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -677,6 +679,12 @@ func (devices *DeviceSet) deactivateDevice(hash string) error {
|
|||
utils.Debugf("[devmapper] deactivateDevice(%s)", hash)
|
||||
defer utils.Debugf("[devmapper] deactivateDevice END")
|
||||
|
||||
// Wait for the unmount to be effective,
|
||||
// by watching the value of Info.OpenCount for the device
|
||||
if err := devices.waitClose(hash); err != nil {
|
||||
utils.Errorf("Warning: error waiting for device %s to close: %s\n", hash, err)
|
||||
}
|
||||
|
||||
info := devices.Devices[hash]
|
||||
if info == nil {
|
||||
return fmt.Errorf("Unknown device %s", hash)
|
||||
|
@ -799,26 +807,20 @@ func (devices *DeviceSet) Shutdown() error {
|
|||
for _, info := range devices.Devices {
|
||||
info.lock.Lock()
|
||||
if info.mountCount > 0 {
|
||||
if err := sysUnmount(info.mountPath, 0); err != nil {
|
||||
// We use MNT_DETACH here in case it is still busy in some running
|
||||
// container. This means it'll go away from the global scope directly,
|
||||
// and the device will be released when that container dies.
|
||||
if err := sysUnmount(info.mountPath, syscall.MNT_DETACH); err != nil {
|
||||
utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err)
|
||||
}
|
||||
|
||||
if err := devices.deactivateDevice(info.Hash); err != nil {
|
||||
utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err)
|
||||
}
|
||||
}
|
||||
info.lock.Unlock()
|
||||
}
|
||||
|
||||
for _, d := range devices.Devices {
|
||||
d.lock.Lock()
|
||||
|
||||
if err := devices.waitClose(d.Hash); err != nil {
|
||||
utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err)
|
||||
}
|
||||
if err := devices.deactivateDevice(d.Hash); err != nil {
|
||||
utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err)
|
||||
}
|
||||
|
||||
d.lock.Unlock()
|
||||
}
|
||||
|
||||
if err := devices.deactivatePool(); err != nil {
|
||||
utils.Debugf("Shutdown deactivate pool , error: %s\n", err)
|
||||
}
|
||||
|
@ -826,7 +828,7 @@ func (devices *DeviceSet) Shutdown() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (devices *DeviceSet) MountDevice(hash, path string) error {
|
||||
func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) error {
|
||||
devices.Lock()
|
||||
defer devices.Unlock()
|
||||
|
||||
|
@ -858,9 +860,11 @@ func (devices *DeviceSet) MountDevice(hash, path string) error {
|
|||
|
||||
var flags uintptr = sysMsMgcVal
|
||||
|
||||
err := sysMount(info.DevName(), path, "ext4", flags, "discard")
|
||||
mountOptions := label.FormatMountLabel("discard", mountLabel)
|
||||
err := sysMount(info.DevName(), path, "ext4", flags, mountOptions)
|
||||
if err != nil && err == sysEInval {
|
||||
err = sysMount(info.DevName(), path, "ext4", flags, "")
|
||||
mountOptions = label.FormatMountLabel(mountLabel, "")
|
||||
err = sysMount(info.DevName(), path, "ext4", flags, mountOptions)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err)
|
||||
|
@ -920,14 +924,11 @@ func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error {
|
|||
return err
|
||||
}
|
||||
utils.Debugf("[devmapper] Unmount done")
|
||||
// Wait for the unmount to be effective,
|
||||
// by watching the value of Info.OpenCount for the device
|
||||
if err := devices.waitClose(hash); err != nil {
|
||||
|
||||
if err := devices.deactivateDevice(hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
devices.deactivateDevice(hash)
|
||||
|
||||
info.mountPath = ""
|
||||
|
||||
return nil
|
||||
|
|
|
@ -22,7 +22,8 @@ func init() {
|
|||
|
||||
type Driver struct {
|
||||
*DeviceSet
|
||||
home string
|
||||
home string
|
||||
MountLabel string
|
||||
}
|
||||
|
||||
var Init = func(home string) (graphdriver.Driver, error) {
|
||||
|
@ -60,13 +61,13 @@ func (d *Driver) Cleanup() error {
|
|||
return d.DeviceSet.Shutdown()
|
||||
}
|
||||
|
||||
func (d *Driver) Create(id, parent string) error {
|
||||
func (d *Driver) Create(id, parent string, mountLabel string) error {
|
||||
d.MountLabel = mountLabel
|
||||
if err := d.DeviceSet.AddDevice(id, parent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mp := path.Join(d.home, "mnt", id)
|
||||
if err := d.mount(id, mp); err != nil {
|
||||
if err := d.mount(id, mp, d.MountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -116,7 +117,7 @@ func (d *Driver) Remove(id string) error {
|
|||
|
||||
func (d *Driver) Get(id string) (string, error) {
|
||||
mp := path.Join(d.home, "mnt", id)
|
||||
if err := d.mount(id, mp); err != nil {
|
||||
if err := d.mount(id, mp, d.MountLabel); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -129,13 +130,13 @@ func (d *Driver) Put(id string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *Driver) mount(id, mountPoint string) error {
|
||||
func (d *Driver) mount(id, mountPoint string, mountLabel string) error {
|
||||
// Create the target directories if they don't exist
|
||||
if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) {
|
||||
return err
|
||||
}
|
||||
// Mount the device
|
||||
return d.DeviceSet.MountDevice(id, mountPoint)
|
||||
return d.DeviceSet.MountDevice(id, mountPoint, mountLabel)
|
||||
}
|
||||
|
||||
func (d *Driver) Exists(id string) bool {
|
||||
|
|
|
@ -494,7 +494,7 @@ func TestDriverCreate(t *testing.T) {
|
|||
"?ioctl.loopctlgetfree",
|
||||
)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
calls.Assert(t,
|
||||
|
@ -612,7 +612,7 @@ func TestDriverRemove(t *testing.T) {
|
|||
"?ioctl.loopctlgetfree",
|
||||
)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -668,7 +668,7 @@ func TestCleanup(t *testing.T) {
|
|||
|
||||
mountPoints := make([]string, 2)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Mount the id
|
||||
|
@ -678,7 +678,7 @@ func TestCleanup(t *testing.T) {
|
|||
}
|
||||
mountPoints[0] = p
|
||||
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -731,7 +731,7 @@ func TestNotMounted(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -749,7 +749,7 @@ func TestMounted(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := d.Get("1"); err != nil {
|
||||
|
@ -769,7 +769,7 @@ func TestInitCleanedDriver(t *testing.T) {
|
|||
t.Skip("FIXME: not a unit test")
|
||||
d := newDriver(t)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := d.Get("1"); err != nil {
|
||||
|
@ -797,7 +797,7 @@ func TestMountMountedDriver(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -816,7 +816,7 @@ func TestGetReturnsValidDevice(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -844,7 +844,7 @@ func TestDriverGetSize(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ type InitFunc func(root string) (Driver, error)
|
|||
type Driver interface {
|
||||
String() string
|
||||
|
||||
Create(id, parent string) error
|
||||
Create(id, parent string, mountLabel string) error
|
||||
Remove(id string) error
|
||||
|
||||
Get(id string) (dir string, err error)
|
||||
|
@ -39,10 +39,9 @@ var (
|
|||
// Slice of drivers that should be used in an order
|
||||
priority = []string{
|
||||
"aufs",
|
||||
"btrfs",
|
||||
"devicemapper",
|
||||
"vfs",
|
||||
// experimental, has to be enabled manually for now
|
||||
"btrfs",
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ func copyDir(src, dst string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Create(id string, parent string) error {
|
||||
func (d *Driver) Create(id string, parent string, mountLabel string) error {
|
||||
dir := d.dir(id)
|
||||
if err := os.MkdirAll(path.Dir(dir), 0700); err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package lxc
|
||||
package bridge
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -93,6 +93,12 @@ func InitDriver(job *engine.Job) engine.Status {
|
|||
network = addr.(*net.IPNet)
|
||||
} else {
|
||||
network = addr.(*net.IPNet)
|
||||
// validate that the bridge ip matches the ip specified by BridgeIP
|
||||
if bridgeIP != "" {
|
||||
if !network.IP.Equal(net.ParseIP(bridgeIP)) {
|
||||
return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bridgeIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Configure iptables for link support
|
|
@ -100,22 +100,30 @@ func ReleaseAll() error {
|
|||
}
|
||||
|
||||
func registerDynamicPort(ip net.IP, proto string) (int, error) {
|
||||
allocated := defaultAllocatedPorts[proto]
|
||||
|
||||
port := nextPort(proto)
|
||||
if port > EndPortRange {
|
||||
return 0, ErrPortExceedsRange
|
||||
}
|
||||
|
||||
if !equalsDefault(ip) {
|
||||
registerIP(ip)
|
||||
|
||||
ipAllocated := otherAllocatedPorts[ip.String()][proto]
|
||||
|
||||
port, err := findNextPort(proto, ipAllocated)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ipAllocated.Push(port)
|
||||
return port, nil
|
||||
|
||||
} else {
|
||||
|
||||
allocated := defaultAllocatedPorts[proto]
|
||||
|
||||
port, err := findNextPort(proto, allocated)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
allocated.Push(port)
|
||||
return port, nil
|
||||
}
|
||||
return port, nil
|
||||
}
|
||||
|
||||
func registerSetPort(ip net.IP, proto string, port int) error {
|
||||
|
@ -142,6 +150,17 @@ func equalsDefault(ip net.IP) bool {
|
|||
return ip == nil || ip.Equal(defaultIP)
|
||||
}
|
||||
|
||||
func findNextPort(proto string, allocated *collections.OrderedIntSet) (int, error) {
|
||||
port := nextPort(proto)
|
||||
for allocated.Exists(port) {
|
||||
port = nextPort(proto)
|
||||
}
|
||||
if port > EndPortRange {
|
||||
return 0, ErrPortExceedsRange
|
||||
}
|
||||
return port, nil
|
||||
}
|
||||
|
||||
func nextPort(proto string) int {
|
||||
c := currentDynamicPort[proto] + 1
|
||||
currentDynamicPort[proto] = c
|
||||
|
|
|
@ -181,4 +181,20 @@ func TestPortAllocation(t *testing.T) {
|
|||
if _, err := RequestPort(ip, "tcp", 80); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
port, err = RequestPort(ip, "tcp", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
port2, err := RequestPort(ip, "tcp", port+1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
port3, err := RequestPort(ip, "tcp", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if port3 == port2 {
|
||||
t.Fatal("Requesting a dynamic port should never allocate a used port")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/dotcloud/docker/runtime/execdriver/lxc"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
_ "github.com/dotcloud/docker/runtime/graphdriver/vfs"
|
||||
_ "github.com/dotcloud/docker/runtime/networkdriver/lxc"
|
||||
_ "github.com/dotcloud/docker/runtime/networkdriver/bridge"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver/portallocator"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
|
@ -467,7 +467,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe
|
|||
}
|
||||
|
||||
initID := fmt.Sprintf("%s-init", container.ID)
|
||||
if err := runtime.driver.Create(initID, img.ID); err != nil {
|
||||
if err := runtime.driver.Create(initID, img.ID, config.Context["mount_label"]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
initPath, err := runtime.driver.Get(initID)
|
||||
|
@ -480,7 +480,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := runtime.driver.Create(container.ID, initID); err != nil {
|
||||
if err := runtime.driver.Create(container.ID, initID, config.Context["mount_label"]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
resolvConf, err := utils.GetResolvConf()
|
||||
|
|
|
@ -28,6 +28,9 @@ func (s *State) String() string {
|
|||
}
|
||||
return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
||||
}
|
||||
if s.FinishedAt.IsZero() {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, utils.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
||||
}
|
||||
|
||||
|
|
|
@ -395,9 +395,18 @@ func (b *buildFile) checkPathForAddition(orig string) error {
|
|||
|
||||
func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error {
|
||||
var (
|
||||
err error
|
||||
origPath = path.Join(b.contextPath, orig)
|
||||
destPath = path.Join(container.RootfsPath(), dest)
|
||||
)
|
||||
|
||||
if destPath != container.RootfsPath() {
|
||||
destPath, err = utils.FollowSymlinkInScope(destPath, container.RootfsPath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve the trailing '/'
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
destPath = destPath + "/"
|
||||
|
@ -736,20 +745,19 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
|||
if len(fileBytes) == 0 {
|
||||
return "", ErrDockerfileEmpty
|
||||
}
|
||||
dockerfile := string(fileBytes)
|
||||
dockerfile = lineContinuation.ReplaceAllString(dockerfile, "")
|
||||
stepN := 0
|
||||
var (
|
||||
dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "")
|
||||
stepN = 0
|
||||
)
|
||||
for _, line := range strings.Split(dockerfile, "\n") {
|
||||
line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n")
|
||||
// Skip comments and empty line
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
|
||||
return "", err
|
||||
}
|
||||
stepN += 1
|
||||
|
||||
}
|
||||
if b.image != "" {
|
||||
fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
|
||||
|
@ -786,6 +794,20 @@ func (b *buildFile) BuildStep(name, expression string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func stripComments(raw []byte) string {
|
||||
var (
|
||||
out []string
|
||||
lines = strings.Split(string(raw), "\n")
|
||||
)
|
||||
for _, l := range lines {
|
||||
if len(l) == 0 || l[0] == '#' {
|
||||
continue
|
||||
}
|
||||
out = append(out, l)
|
||||
}
|
||||
return strings.Join(out, "\n")
|
||||
}
|
||||
|
||||
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
|
||||
return &buildFile{
|
||||
runtime: srv.runtime,
|
||||
|
|
|
@ -222,6 +222,10 @@ func (srv *Server) Events(job *engine.Job) engine.Status {
|
|||
|
||||
listener := make(chan utils.JSONMessage)
|
||||
srv.Lock()
|
||||
if old, ok := srv.listeners[from]; ok {
|
||||
delete(srv.listeners, from)
|
||||
close(old)
|
||||
}
|
||||
srv.listeners[from] = listener
|
||||
srv.Unlock()
|
||||
job.Stdout.Write(nil) // flush
|
||||
|
|
|
@ -25,6 +25,11 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
type KeyValuePair struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// A common interface to access the Fatal method of
|
||||
// both testing.B and testing.T.
|
||||
type Fataler interface {
|
||||
|
@ -1071,3 +1076,11 @@ func ReadSymlinkedDirectory(path string) (string, error) {
|
|||
}
|
||||
return realPath, nil
|
||||
}
|
||||
|
||||
func ParseKeyValueOpt(opt string) (string, string, error) {
|
||||
parts := strings.SplitN(opt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
|
||||
}
|
||||
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue