Merge pull request #21518 from tiborvass/cherrypicks-1.11.0-rc2

Cherrypicks 1.11.0 rc2
This commit is contained in:
Tibor Vass 2016-03-25 15:09:41 -04:00
commit 31755449e1
74 changed files with 750 additions and 497 deletions

View file

@ -38,9 +38,11 @@ RUN echo deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty main > /etc/apt/s
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
apt-utils \
aufs-tools \
automake \
bash-completion \
bsdmainutils \
btrfs-tools \
build-essential \
clang-3.8 \
@ -64,12 +66,12 @@ RUN apt-get update && apt-get install -y \
python-mock \
python-pip \
python-websocket \
s3cmd=1.5.0* \
ubuntu-zfs \
xfsprogs \
libzfs-dev \
tar \
--no-install-recommends \
&& pip install awscli==1.10.15 \
&& ln -snf /usr/bin/clang-3.8 /usr/local/bin/clang \
&& ln -snf /usr/bin/clang++-3.8 /usr/local/bin/clang++
@ -119,7 +121,7 @@ RUN set -x \
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
# with a heads-up.
ENV GO_VERSION 1.6
ENV GO_VERSION 1.5.3
RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
@ -170,12 +172,13 @@ RUN set -x \
# Install notary server
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GO15VENDOREXPERIMENT=1 \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
@ -186,13 +189,6 @@ RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Setup s3cmd config
RUN { \
echo '[default]'; \
echo 'access_key=$AWS_ACCESS_KEY'; \
echo 'secret_key=$AWS_SECRET_KEY'; \
} > ~/.s3cfg
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
@ -202,7 +198,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
@ -247,7 +243,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install runc
ENV RUNC_COMMIT bbde9c426ff363d813b8722f0744115c13b408b6
ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@ -257,7 +253,7 @@ RUN set -x \
&& cp runc /usr/local/bin/docker-runc
# Install containerd
ENV CONTAINERD_COMMIT 142e22a4dce86f3b8ce068a0b043489d21976bb8
ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

View file

@ -119,12 +119,13 @@ RUN set -x \
# Install notary server
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GO15VENDOREXPERIMENT=1 \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
@ -135,13 +136,6 @@ RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Setup s3cmd config
RUN { \
echo '[default]'; \
echo 'access_key=$AWS_ACCESS_KEY'; \
echo 'secret_key=$AWS_SECRET_KEY'; \
} > ~/.s3cfg
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
@ -151,7 +145,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
@ -187,7 +181,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install runc
ENV RUNC_COMMIT bbde9c426ff363d813b8722f0744115c13b408b6
ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@ -197,7 +191,7 @@ RUN set -x \
&& cp runc /usr/local/bin/docker-runc
# Install containerd
ENV CONTAINERD_COMMIT 142e22a4dce86f3b8ce068a0b043489d21976bb8
ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

View file

@ -65,8 +65,8 @@ RUN cd /usr/local/lvm2 \
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install Go
ENV GO_VERSION 1.6
RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-armv6l.tar.gz" \
ENV GO_VERSION 1.5.3
RUN curl -fsSL "http://dave.cheney.net/paste/go${GO_VERSION}.linux-arm.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
@ -128,12 +128,13 @@ RUN set -x \
# Install notary server
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GO15VENDOREXPERIMENT=1 \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
@ -153,7 +154,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
@ -197,7 +198,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install runc
ENV RUNC_COMMIT bbde9c426ff363d813b8722f0744115c13b408b6
ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@ -207,7 +208,7 @@ RUN set -x \
&& cp runc /usr/local/bin/docker-runc
# Install containerd
ENV CONTAINERD_COMMIT 142e22a4dce86f3b8ce068a0b043489d21976bb8
ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

View file

@ -74,7 +74,7 @@ WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
# Install runc
ENV RUNC_COMMIT bbde9c426ff363d813b8722f0744115c13b408b6
ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@ -84,7 +84,7 @@ RUN set -x \
&& cp runc /usr/local/bin/docker-runc
# Install containerd
ENV CONTAINERD_COMMIT 142e22a4dce86f3b8ce068a0b043489d21976bb8
ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

View file

@ -74,8 +74,8 @@ RUN cd /usr/local/lvm2 \
# TODO install Go, using gccgo as GOROOT_BOOTSTRAP (Go 1.5+ supports ppc64le properly)
# possibly a ppc64le/golang image?
## BUILD GOLANG 1.6
ENV GO_VERSION 1.6
## BUILD GOLANG 1.5.3
ENV GO_VERSION 1.5.3
ENV GO_DOWNLOAD_URL https://golang.org/dl/go${GO_VERSION}.src.tar.gz
ENV GO_DOWNLOAD_SHA256 a96cce8ce43a9bf9b2a4c7d470bc7ee0cb00410da815980681c8353218dcf146
ENV GOROOT_BOOTSTRAP /usr/local
@ -129,12 +129,13 @@ RUN set -x \
# Install notary and notary-server
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GO15VENDOREXPERIMENT=1 \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
@ -154,7 +155,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux
ENV DOCKER_BUILDTAGS apparmor selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
@ -198,17 +199,17 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install runc
ENV RUNC_COMMIT bbde9c426ff363d813b8722f0744115c13b408b6
ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
&& cd "$GOPATH/src/github.com/opencontainers/runc" \
&& git checkout -q "$RUNC_COMMIT" \
&& make static BUILDTAGS="seccomp apparmor selinux" \
&& make static BUILDTAGS="apparmor selinux" \
&& cp runc /usr/local/bin/docker-runc
# Install containerd
ENV CONTAINERD_COMMIT 142e22a4dce86f3b8ce068a0b043489d21976bb8
ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

View file

@ -110,11 +110,12 @@ RUN set -x \
# Install notary server
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GO15VENDOREXPERIMENT=1 \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
go build -gccgoflags=-lpthread -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
@ -133,7 +134,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux
ENV DOCKER_BUILDTAGS apparmor selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
@ -177,7 +178,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install runc
ENV RUNC_COMMIT bbde9c426ff363d813b8722f0744115c13b408b6
ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@ -187,7 +188,7 @@ RUN set -x \
&& cp runc /usr/local/bin/docker-runc
# Install containerd
ENV CONTAINERD_COMMIT 142e22a4dce86f3b8ce068a0b043489d21976bb8
ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

View file

@ -30,7 +30,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
# Install runc
ENV RUNC_COMMIT bbde9c426ff363d813b8722f0744115c13b408b6
ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@ -40,7 +40,7 @@ RUN set -x \
&& cp runc /usr/local/bin/docker-runc
# Install containerd
ENV CONTAINERD_COMMIT 142e22a4dce86f3b8ce068a0b043489d21976bb8
ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

View file

@ -38,9 +38,9 @@
FROM windowsservercore
# Environment variable notes:
# - GOLANG_VERSION must consistent with 'Dockerfile' used by Linux'.
# - GO_VERSION must consistent with 'Dockerfile' used by Linux'.
# - FROM_DOCKERFILE is used for detection of building within a container.
ENV GOLANG_VERSION=1.6 \
ENV GO_VERSION=1.5.3 \
GIT_LOCATION=https://github.com/git-for-windows/git/releases/download/v2.7.2.windows.1/Git-2.7.2-64-bit.exe \
RSRC_COMMIT=ba14da1f827188454a4591717fff29999010887f \
GOPATH=C:/go;C:/go/src/github.com/docker/docker/vendor \
@ -63,7 +63,7 @@ RUN \
Download-File %GIT_LOCATION% gitsetup.exe; \
\
Write-Host INFO: Downloading go...; \
Download-File https://storage.googleapis.com/golang/go%GOLANG_VERSION%.windows-amd64.msi go.msi; \
Download-File https://storage.googleapis.com/golang/go%GO_VERSION%.windows-amd64.msi go.msi; \
\
Write-Host INFO: Downloading compiler 1 of 3...; \
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip gcc.zip; \

View file

@ -13,3 +13,4 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor selinux
ENV RUNC_BUILDTAGS apparmor selinux

View file

@ -13,3 +13,4 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
ENV RUNC_BUILDTAGS apparmor seccomp selinux

View file

@ -14,3 +14,4 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor selinux
ENV RUNC_BUILDTAGS apparmor selinux

View file

@ -42,6 +42,7 @@ for version in "${versions[@]}"; do
echo >> "$version/Dockerfile"
extraBuildTags=
runcBuildTags=
# this list is sorted alphabetically; please keep it that way
packages=(
@ -64,7 +65,7 @@ for version in "${versions[@]}"; do
# packaging for "sd-journal.h" and libraries varies
case "$suite" in
precise|wheezy) ;;
sid|stretch|wily) packages+=( libsystemd-dev );;
sid|stretch|wily|xenial) packages+=( libsystemd-dev );;
*) packages+=( libsystemd-journal-dev );;
esac
@ -73,9 +74,11 @@ for version in "${versions[@]}"; do
case "$suite" in
precise|wheezy|jessie|trusty)
packages=( "${packages[@]/libseccomp-dev}" )
runcBuildTags="apparmor selinux"
;;
*)
extraBuildTags+=' seccomp'
runcBuildTags="apparmor seccomp selinux"
;;
esac
@ -124,4 +127,5 @@ for version in "${versions[@]}"; do
buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' )
echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile"
echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile"
done

View file

@ -13,3 +13,4 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor exclude_graphdriver_btrfs exclude_graphdriver_devicemapper selinux
ENV RUNC_BUILDTAGS apparmor selinux

View file

@ -13,3 +13,4 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor selinux
ENV RUNC_BUILDTAGS apparmor selinux

View file

@ -13,3 +13,4 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
ENV RUNC_BUILDTAGS apparmor seccomp selinux

View file

@ -0,0 +1,16 @@
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"!
#
FROM ubuntu:xenial
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.5.3
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
ENV RUNC_BUILDTAGS apparmor seccomp selinux

View file

@ -6,7 +6,7 @@ FROM centos:7
RUN yum groupinstall -y "Development Tools"
RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs
RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar
RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git
ENV GO_VERSION 1.5.3
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
@ -15,4 +15,5 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS selinux
ENV RUNC_BUILDTAGS selinux

View file

@ -5,7 +5,7 @@
FROM fedora:22
RUN dnf install -y @development-tools fedora-packager
RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar
RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git
ENV GO_VERSION 1.5.3
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
@ -14,4 +14,5 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS seccomp selinux
ENV RUNC_BUILDTAGS seccomp selinux

View file

@ -5,7 +5,7 @@
FROM fedora:23
RUN dnf install -y @development-tools fedora-packager
RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar
RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git
ENV GO_VERSION 1.5.3
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
@ -14,4 +14,5 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS seccomp selinux
ENV RUNC_BUILDTAGS seccomp selinux

View file

@ -39,6 +39,7 @@ for version in "${versions[@]}"; do
echo >> "$version/Dockerfile"
extraBuildTags=
runcBuildTags=
case "$from" in
centos:*)
@ -77,6 +78,7 @@ for version in "${versions[@]}"; do
sqlite-devel # for "sqlite3.h"
systemd-devel # for "sd-journal.h" and libraries
tar # older versions of dev-tools do not have tar
git # required for containerd and runc clone
)
case "$from" in
@ -98,9 +100,11 @@ for version in "${versions[@]}"; do
case "$from" in
opensuse:*|oraclelinux:*|centos:7)
packages=( "${packages[@]/libseccomp-devel}" )
runcBuildTags="selinux"
;;
*)
extraBuildTags+=' seccomp'
runcBuildTags="seccomp selinux"
;;
esac
@ -148,6 +152,7 @@ for version in "${versions[@]}"; do
buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' )
echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile"
echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
case "$from" in

View file

@ -5,7 +5,7 @@
FROM opensuse:13.2
RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build
RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar systemd-rpm-macros
RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git systemd-rpm-macros
ENV GO_VERSION 1.5.3
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
@ -14,4 +14,5 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS selinux
ENV RUNC_BUILDTAGS selinux

View file

@ -5,7 +5,7 @@
FROM oraclelinux:6
RUN yum groupinstall -y "Development Tools"
RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel tar
RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel tar git
RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4
RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek
@ -17,6 +17,7 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS selinux
ENV RUNC_BUILDTAGS selinux
ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \
-I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \

View file

@ -5,7 +5,7 @@
FROM oraclelinux:7
RUN yum groupinstall -y "Development Tools"
RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar
RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git
ENV GO_VERSION 1.5.3
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
@ -14,4 +14,5 @@ ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS selinux
ENV RUNC_BUILDTAGS selinux

View file

@ -410,7 +410,7 @@ __docker_complete_log_drivers() {
__docker_complete_log_options() {
# see docs/reference/logging/index.md
local awslogs_options="awslogs-region awslogs-group awslogs-stream"
local fluentd_options="env fluentd-address labels tag"
local fluentd_options="env fluentd-address fluentd-async-connect fluentd-buffer-limit fluentd-retry-wait fluentd-max-retries labels tag"
local gcplogs_options="env gcp-log-cmd gcp-project labels"
local gelf_options="env gelf-address gelf-compression-level gelf-compression-type labels tag"
local journald_options="env labels tag"
@ -459,6 +459,10 @@ __docker_complete_log_options() {
__docker_complete_log_driver_options() {
local key=$(__docker_map_key_of_current_option '--log-opt')
case "$key" in
fluentd-async-connect)
COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) )
return
;;
gelf-address)
COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur##*=}" ) )
__docker_nospace
@ -1211,7 +1215,7 @@ _docker_load() {
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--help --input -i" -- "$cur" ) )
COMPREPLY=( $( compgen -W "--help --input -i --quiet -q" -- "$cur" ) )
;;
esac
}
@ -1476,6 +1480,11 @@ _docker_ps() {
COMPREPLY=( $( compgen -W "created dead exited paused restarting running" -- "${cur##*=}" ) )
return
;;
volume)
cur="${cur##*=}"
__docker_complete_volumes
return
;;
esac
case "$prev" in
@ -1483,7 +1492,7 @@ _docker_ps() {
__docker_complete_containers_all
;;
--filter|-f)
COMPREPLY=( $( compgen -S = -W "ancestor exited id label name status" -- "$cur" ) )
COMPREPLY=( $( compgen -S = -W "ancestor exited id label name status volume" -- "$cur" ) )
__docker_nospace
return
;;

View file

@ -222,6 +222,7 @@ func (daemon *Daemon) exportContainerRw(container *container.Container) (archive
archive, err := container.RWLayer.TarStream()
if err != nil {
daemon.Unmount(container) // logging is already handled in the `Unmount` function
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {

View file

@ -295,7 +295,18 @@ func specDevice(d *configs.Device) specs.Device {
}
}
func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []specs.Device, err error) {
func specDeviceCgroup(d *configs.Device) specs.DeviceCgroup {
t := string(d.Type)
return specs.DeviceCgroup{
Allow: true,
Type: &t,
Major: &d.Major,
Minor: &d.Minor,
Access: &d.Permissions,
}
}
func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) {
resolvedPathOnHost := deviceMapping.PathOnHost
// check if it is a symbolic link
@ -309,7 +320,7 @@ func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []spec
// if there was no error, return the device
if err == nil {
device.Path = deviceMapping.PathInContainer
return append(devs, specDevice(device)), nil
return append(devs, specDevice(device)), append(devPermissions, specDeviceCgroup(device)), nil
}
// if the device is not a device node
@ -330,6 +341,7 @@ func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []spec
// add the device to userSpecified devices
childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, deviceMapping.PathInContainer, 1)
devs = append(devs, specDevice(childDevice))
devPermissions = append(devPermissions, specDeviceCgroup(childDevice))
return nil
})
@ -337,10 +349,10 @@ func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []spec
}
if len(devs) > 0 {
return devs, nil
return devs, devPermissions, nil
}
return devs, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
}
func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device {

View file

@ -130,7 +130,7 @@ func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevic
weight := weightDevice.Weight
d := specs.WeightDevice{Weight: &weight}
d.Major = int64(stat.Rdev / 256)
d.Major = int64(stat.Rdev % 256)
d.Minor = int64(stat.Rdev % 256)
blkioWeightDevices = append(blkioWeightDevices, d)
}
@ -187,7 +187,7 @@ func getBlkioReadIOpsDevices(config containertypes.Resources) ([]specs.ThrottleD
rate := iopsDevice.Rate
d := specs.ThrottleDevice{Rate: &rate}
d.Major = int64(stat.Rdev / 256)
d.Major = int64(stat.Rdev % 256)
d.Minor = int64(stat.Rdev % 256)
blkioReadIOpsDevice = append(blkioReadIOpsDevice, d)
}
@ -205,7 +205,7 @@ func getBlkioWriteIOpsDevices(config containertypes.Resources) ([]specs.Throttle
rate := iopsDevice.Rate
d := specs.ThrottleDevice{Rate: &rate}
d.Major = int64(stat.Rdev / 256)
d.Major = int64(stat.Rdev % 256)
d.Minor = int64(stat.Rdev % 256)
blkioWriteIOpsDevice = append(blkioWriteIOpsDevice, d)
}
@ -223,7 +223,7 @@ func getBlkioReadBpsDevices(config containertypes.Resources) ([]specs.ThrottleDe
rate := bpsDevice.Rate
d := specs.ThrottleDevice{Rate: &rate}
d.Major = int64(stat.Rdev / 256)
d.Major = int64(stat.Rdev % 256)
d.Minor = int64(stat.Rdev % 256)
blkioReadBpsDevice = append(blkioReadBpsDevice, d)
}
@ -241,7 +241,7 @@ func getBlkioWriteBpsDevices(config containertypes.Resources) ([]specs.ThrottleD
rate := bpsDevice.Rate
d := specs.ThrottleDevice{Rate: &rate}
d.Major = int64(stat.Rdev / 256)
d.Major = int64(stat.Rdev % 256)
d.Minor = int64(stat.Rdev % 256)
blkioWriteBpsDevice = append(blkioWriteBpsDevice, d)
}

View file

@ -29,6 +29,7 @@ import (
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"syscall"
@ -64,21 +65,13 @@ func init() {
graphdriver.Register("aufs", Init)
}
type data struct {
referenceCount int
path string
}
// Driver contains information about the filesystem mounted.
// root of the filesystem
// sync.Mutex to protect against concurrent modifications
// active maps mount id to the count
type Driver struct {
root string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
sync.Mutex // Protects concurrent modification to active
active map[string]*data
root string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
pathCacheLock sync.Mutex
pathCache map[string]string
}
// Init returns a new AUFS driver.
@ -111,10 +104,10 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
a := &Driver{
root: root,
active: make(map[string]*data),
uidMaps: uidMaps,
gidMaps: gidMaps,
root: root,
uidMaps: uidMaps,
gidMaps: gidMaps,
pathCache: make(map[string]string),
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
@ -228,9 +221,7 @@ func (a *Driver) Create(id, parent, mountLabel string) error {
}
}
}
a.Lock()
a.active[id] = &data{}
a.Unlock()
return nil
}
@ -259,108 +250,91 @@ func (a *Driver) createDirsFor(id string) error {
// Remove will unmount and remove the given id.
func (a *Driver) Remove(id string) error {
// Protect the a.active from concurrent access
a.Lock()
defer a.Unlock()
m := a.active[id]
if m != nil {
if m.referenceCount > 0 {
return nil
}
// Make sure the dir is umounted first
if err := a.unmount(m); err != nil {
return err
}
a.pathCacheLock.Lock()
mountpoint, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
mountpoint = a.getMountpoint(id)
}
tmpDirs := []string{
"mnt",
"diff",
if err := a.unmount(mountpoint); err != nil {
// no need to return here, we can still try to remove since the `Rename` will fail below if still mounted
logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err)
}
// Atomically remove each directory in turn by first moving it out of the
// way (so that docker doesn't find it anymore) before doing removal of
// the whole tree.
for _, p := range tmpDirs {
realPath := path.Join(a.rootPath(), p, id)
tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id))
if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) {
return err
}
defer os.RemoveAll(tmpPath)
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
return err
}
defer os.RemoveAll(tmpMntPath)
tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id))
if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
return err
}
defer os.RemoveAll(tmpDiffpath)
// Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
return err
}
if m != nil {
delete(a.active, id)
}
a.pathCacheLock.Lock()
delete(a.pathCache, id)
a.pathCacheLock.Unlock()
return nil
}
// Get returns the rootfs path for the id.
// This will mount the dir at it's given path
func (a *Driver) Get(id, mountLabel string) (string, error) {
// Protect the a.active from concurrent access
a.Lock()
defer a.Unlock()
m := a.active[id]
if m == nil {
m = &data{}
a.active[id] = m
}
parents, err := a.getParentLayerPaths(id)
if err != nil && !os.IsNotExist(err) {
return "", err
}
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
m.path = path.Join(a.rootPath(), "diff", id)
if len(parents) > 0 {
m.path = path.Join(a.rootPath(), "mnt", id)
if m.referenceCount == 0 {
if err := a.mount(id, m, mountLabel, parents); err != nil {
return "", err
}
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
m = a.getDiffPath(id)
if len(parents) > 0 {
m = a.getMountpoint(id)
}
}
m.referenceCount++
return m.path, nil
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
if len(parents) > 0 {
if err := a.mount(id, m, mountLabel, parents); err != nil {
return "", err
}
}
a.pathCacheLock.Lock()
a.pathCache[id] = m
a.pathCacheLock.Unlock()
return m, nil
}
// Put unmounts and updates list of active mounts.
func (a *Driver) Put(id string) error {
// Protect the a.active from concurrent access
a.Lock()
defer a.Unlock()
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
if !exists {
m = a.getMountpoint(id)
a.pathCache[id] = m
}
a.pathCacheLock.Unlock()
m := a.active[id]
if m == nil {
// but it might be still here
if a.Exists(id) {
path := path.Join(a.rootPath(), "mnt", id)
err := Unmount(path)
if err != nil {
logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
}
}
return nil
err := a.unmount(m)
if err != nil {
logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
}
if count := m.referenceCount; count > 1 {
m.referenceCount = count - 1
} else {
ids, _ := getParentIds(a.rootPath(), id)
// We only mounted if there are any parents
if ids != nil && len(ids) > 0 {
a.unmount(m)
}
delete(a.active, id)
}
return nil
return err
}
// Diff produces an archive of the changes between the specified
@ -443,16 +417,13 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
return layers, nil
}
func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) error {
func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
// If the id is mounted or we get an error return
if mounted, err := a.mounted(m); err != nil || mounted {
if mounted, err := a.mounted(target); err != nil || mounted {
return err
}
var (
target = m.path
rw = path.Join(a.rootPath(), "diff", id)
)
rw := a.getDiffPath(id)
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
@ -460,26 +431,39 @@ func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) e
return nil
}
func (a *Driver) unmount(m *data) error {
if mounted, err := a.mounted(m); err != nil || !mounted {
func (a *Driver) unmount(mountPath string) error {
if mounted, err := a.mounted(mountPath); err != nil || !mounted {
return err
}
return Unmount(m.path)
if err := Unmount(mountPath); err != nil {
return err
}
return nil
}
func (a *Driver) mounted(m *data) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(m.path, &buf); err != nil {
return false, nil
}
return graphdriver.FsMagic(buf.Type) == graphdriver.FsMagicAufs, nil
func (a *Driver) mounted(mountpoint string) (bool, error) {
return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint)
}
// Cleanup aufs and unmount all mountpoints
func (a *Driver) Cleanup() error {
for id, m := range a.active {
var dirs []string
if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
dirs = append(dirs, path)
return nil
}); err != nil {
return err
}
for _, m := range dirs {
if err := a.unmount(m); err != nil {
logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err)
logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err)
}
}
return mountpk.Unmount(a.root)

View file

@ -200,7 +200,7 @@ func TestMountedFalseResponse(t *testing.T) {
t.Fatal(err)
}
response, err := d.mounted(d.active["1"])
response, err := d.mounted(d.getDiffPath("1"))
if err != nil {
t.Fatal(err)
}
@ -227,7 +227,7 @@ func TestMountedTrueReponse(t *testing.T) {
t.Fatal(err)
}
response, err := d.mounted(d.active["2"])
response, err := d.mounted(d.pathCache["2"])
if err != nil {
t.Fatal(err)
}
@ -293,7 +293,7 @@ func TestRemoveMountedDir(t *testing.T) {
t.Fatal("mntPath should not be empty string")
}
mounted, err := d.mounted(d.active["2"])
mounted, err := d.mounted(d.pathCache["2"])
if err != nil {
t.Fatal(err)
}

View file

@ -46,3 +46,19 @@ func getParentIds(root, id string) ([]string, error) {
}
return out, s.Err()
}
func (a *Driver) getMountpoint(id string) string {
return path.Join(a.mntPath(), id)
}
func (a *Driver) mntPath() string {
return path.Join(a.rootPath(), "mnt")
}
func (a *Driver) getDiffPath(id string) string {
return path.Join(a.diffPath(), id)
}
func (a *Driver) diffPath() string {
return path.Join(a.rootPath(), "diff")
}

View file

@ -69,9 +69,6 @@ type devInfo struct {
Deleted bool `json:"deleted"`
devices *DeviceSet
mountCount int
mountPath string
// The global DeviceSet lock guarantees that we serialize all
// the calls to libdevmapper (which is not threadsafe), but we
// sometimes release that lock while sleeping. In that case
@ -1991,13 +1988,6 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
devices.Lock()
defer devices.Unlock()
// If mountcount is not zero, that means devices is still in use
// or has not been Put() properly. Fail device deletion.
if info.mountCount != 0 {
return fmt.Errorf("devmapper: Can't delete device %v as it is still mounted. mntCount=%v", info.Hash, info.mountCount)
}
return devices.deleteDevice(info, syncDelete)
}
@ -2116,13 +2106,11 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
}
// Shutdown shuts down the device by unmounting the root.
func (devices *DeviceSet) Shutdown() error {
func (devices *DeviceSet) Shutdown(home string) error {
logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix)
logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root)
defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix)
var devs []*devInfo
// Stop deletion worker. This should start delivering new events to
// ticker channel. That means no new instance of cleanupDeletedDevice()
// will run after this call. If one instance is already running at
@ -2139,30 +2127,46 @@ func (devices *DeviceSet) Shutdown() error {
// metadata. Hence save this early before trying to deactivate devices.
devices.saveDeviceSetMetaData()
for _, info := range devices.Devices {
devs = append(devs, info)
// ignore the error since it's just a best effort to not try to unmount something that's mounted
mounts, _ := mount.GetMounts()
mounted := make(map[string]bool, len(mounts))
for _, mnt := range mounts {
mounted[mnt.Mountpoint] = true
}
devices.Unlock()
for _, info := range devs {
info.lock.Lock()
if info.mountCount > 0 {
if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
if mounted[p] {
// We use MNT_DETACH here in case it is still busy in some running
// container. This means it'll go away from the global scope directly,
// and the device will be released when that container dies.
if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", info.mountPath, err)
if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil {
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
}
devices.Lock()
if err := devices.deactivateDevice(info); err != nil {
logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", info.Hash, err)
}
devices.Unlock()
}
info.lock.Unlock()
if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil {
logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err)
} else {
if err := devices.deactivateDevice(devInfo); err != nil {
logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err)
}
}
return nil
}); err != nil && !os.IsNotExist(err) {
devices.Unlock()
return err
}
devices.Unlock()
info, _ := devices.lookupDeviceWithLock("")
if info != nil {
info.lock.Lock()
@ -2202,15 +2206,6 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
devices.Lock()
defer devices.Unlock()
if info.mountCount > 0 {
if path != info.mountPath {
return fmt.Errorf("devmapper: Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path)
}
info.mountCount++
return nil
}
if err := devices.activateDeviceIfNeeded(info, false); err != nil {
return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err)
}
@ -2234,9 +2229,6 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err)
}
info.mountCount = 1
info.mountPath = path
return nil
}
@ -2256,20 +2248,6 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
devices.Lock()
defer devices.Unlock()
// If there are running containers when daemon crashes, during daemon
// restarting, it will kill running containers and will finally call
// Put() without calling Get(). So info.MountCount may become negative.
// if info.mountCount goes negative, we do the unmount and assign
// it to 0.
info.mountCount--
if info.mountCount > 0 {
return nil
} else if info.mountCount < 0 {
logrus.Warnf("devmapper: Mount count of device went negative. Put() called without matching Get(). Resetting count to 0")
info.mountCount = 0
}
logrus.Debugf("devmapper: Unmount(%s)", mountPath)
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
return err
@ -2280,8 +2258,6 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
return err
}
info.mountPath = ""
return nil
}

View file

@ -108,7 +108,7 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
// Cleanup unmounts a device.
func (d *Driver) Cleanup() error {
err := d.DeviceSet.Shutdown()
err := d.DeviceSet.Shutdown(d.home)
if err2 := mount.Unmount(d.home); err == nil {
err = err2

View file

@ -1,8 +1,19 @@
package graphdriver
import "syscall"
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
)
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View file

@ -42,6 +42,8 @@ const (
FsMagicXfs = FsMagic(0x58465342)
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
// FsMagicOverlay filesystem id for overlay
FsMagicOverlay = FsMagic(0x794C7630)
)
var (
@ -86,3 +88,12 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
}
return FsMagic(buf.Type), nil
}
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View file

@ -88,21 +88,13 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Rea
// of that. This means all child images share file (but not directory)
// data with the parent.
// ActiveMount contains information about the count, path and whether is mounted or not.
// This information is part of the Driver, that contains list of active mounts that are part of this overlay.
type ActiveMount struct {
count int
path string
mounted bool
}
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct {
home string
sync.Mutex // Protects concurrent modification to active
active map[string]*ActiveMount
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
home string
pathCacheLock sync.Mutex
pathCache map[string]string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
}
var backingFs = "<unknown>"
@ -151,10 +143,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
d := &Driver{
home: home,
active: make(map[string]*ActiveMount),
uidMaps: uidMaps,
gidMaps: gidMaps,
home: home,
pathCache: make(map[string]string),
uidMaps: uidMaps,
gidMaps: gidMaps,
}
return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil
@ -325,23 +317,14 @@ func (d *Driver) Remove(id string) error {
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
return err
}
d.pathCacheLock.Lock()
delete(d.pathCache, id)
d.pathCacheLock.Unlock()
return nil
}
// Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, mountLabel string) (string, error) {
// Protect the d.active from concurrent access
d.Lock()
defer d.Unlock()
mount := d.active[id]
if mount != nil {
mount.count++
return mount.path, nil
}
mount = &ActiveMount{count: 1}
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return "", err
@ -350,9 +333,10 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
// If id has a root, just return it
rootDir := path.Join(dir, "root")
if _, err := os.Stat(rootDir); err == nil {
mount.path = rootDir
d.active[id] = mount
return mount.path, nil
d.pathCacheLock.Lock()
d.pathCache[id] = rootDir
d.pathCacheLock.Unlock()
return rootDir, nil
}
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
@ -365,6 +349,16 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
mergedDir := path.Join(dir, "merged")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
// if it's mounted already, just return
mounted, err := d.mounted(mergedDir)
if err != nil {
return "", err
}
if mounted {
return mergedDir, nil
}
if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
}
@ -378,42 +372,38 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
return "", err
}
mount.path = mergedDir
mount.mounted = true
d.active[id] = mount
return mount.path, nil
d.pathCacheLock.Lock()
d.pathCache[id] = mergedDir
d.pathCacheLock.Unlock()
return mergedDir, nil
}
func (d *Driver) mounted(dir string) (bool, error) {
return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir)
}
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
// Protect the d.active from concurrent access
d.Lock()
defer d.Unlock()
d.pathCacheLock.Lock()
mountpoint, exists := d.pathCache[id]
d.pathCacheLock.Unlock()
mount := d.active[id]
if mount == nil {
if !exists {
logrus.Debugf("Put on a non-mounted device %s", id)
// but it might be still here
if d.Exists(id) {
mergedDir := path.Join(d.dir(id), "merged")
err := syscall.Unmount(mergedDir, 0)
if err != nil {
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
mountpoint = path.Join(d.dir(id), "merged")
}
return nil
d.pathCacheLock.Lock()
d.pathCache[id] = mountpoint
d.pathCacheLock.Unlock()
}
mount.count--
if mount.count > 0 {
return nil
}
defer delete(d.active, id)
if mount.mounted {
err := syscall.Unmount(mount.path, 0)
if err != nil {
if mounted, err := d.mounted(mountpoint); mounted || err != nil {
if err = syscall.Unmount(mountpoint, 0); err != nil {
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
return err

View file

@ -13,7 +13,6 @@ import (
"path"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
@ -47,10 +46,6 @@ const (
type Driver struct {
// info stores the shim driver information
info hcsshim.DriverInfo
// Mutex protects concurrent modification to active
sync.Mutex
// active stores references to the activated layers
active map[string]int
}
var _ graphdriver.DiffGetterDriver = &Driver{}
@ -63,7 +58,6 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
HomeDir: home,
Flavour: filterDriver,
},
active: make(map[string]int),
}
return d, nil
}
@ -76,7 +70,6 @@ func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (
HomeDir: home,
Flavour: diffDriver,
},
active: make(map[string]int),
}
return d, nil
}
@ -189,9 +182,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
var dir string
d.Lock()
defer d.Unlock()
rID, err := d.resolveID(id)
if err != nil {
return "", err
@ -203,16 +193,14 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
return "", err
}
if d.active[rID] == 0 {
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
return "", err
}
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
}
return "", err
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
return "", err
}
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
}
return "", err
}
mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
@ -223,8 +211,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
return "", err
}
d.active[rID]++
// If the layer has a mount path, use that. Otherwise, use the
// folder path.
if mountPath != "" {
@ -245,22 +231,10 @@ func (d *Driver) Put(id string) error {
return err
}
d.Lock()
defer d.Unlock()
if d.active[rID] > 1 {
d.active[rID]--
} else if d.active[rID] == 1 {
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return err
}
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
return err
}
delete(d.active, rID)
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return err
}
return nil
return hcsshim.DeactivateLayer(d.info, rID)
}
// Cleanup ensures the information the driver stores is properly removed.
@ -270,62 +244,40 @@ func (d *Driver) Cleanup() error {
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
// The layer should be mounted when calling this function
func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
rID, err := d.resolveID(id)
if err != nil {
return
}
// Getting the layer paths must be done outside of the lock.
layerChain, err := d.getLayerChain(rID)
if err != nil {
return
}
var undo func()
d.Lock()
// To support export, a layer must be activated but not prepared.
if d.info.Flavour == filterDriver {
if d.active[rID] == 0 {
if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
d.Unlock()
return
}
undo = func() {
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}
} else {
if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
d.Unlock()
return
}
undo = func() {
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err)
}
}
}
// this is assuming that the layer is unmounted
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return nil, err
}
d.Unlock()
defer func() {
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}()
arch, err := d.exportLayer(rID, layerChain)
if err != nil {
undo()
return
}
return ioutils.NewReadCloserWrapper(arch, func() error {
defer undo()
return arch.Close()
}), nil
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
// The layer should be mounted when calling this function
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
rID, err := d.resolveID(id)
if err != nil {
@ -336,31 +288,15 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
return nil, err
}
d.Lock()
if d.info.Flavour == filterDriver {
if d.active[rID] == 0 {
if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
d.Unlock()
return nil, err
}
defer func() {
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}()
} else {
if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
d.Unlock()
return nil, err
}
defer func() {
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err)
}
}()
}
// this is assuming that the layer is unmounted
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return nil, err
}
d.Unlock()
defer func() {
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}()
r, err := hcsshim.NewLayerReader(d.info, id, parentChain)
if err != nil {
@ -391,6 +327,7 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The layer should not be mounted when calling this function
func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
rPId, err := d.resolveID(parent)
if err != nil {

View file

@ -22,12 +22,6 @@ import (
"github.com/opencontainers/runc/libcontainer/label"
)
type activeMount struct {
count int
path string
mounted bool
}
type zfsOptions struct {
fsName string
mountPath string
@ -109,7 +103,6 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
dataset: rootDataset,
options: options,
filesystemsCache: filesystemsCache,
active: make(map[string]*activeMount),
uidMaps: uidMaps,
gidMaps: gidMaps,
}
@ -166,7 +159,6 @@ type Driver struct {
options zfsOptions
sync.Mutex // protects filesystem cache against concurrent access
filesystemsCache map[string]bool
active map[string]*activeMount
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
}
@ -302,17 +294,6 @@ func (d *Driver) Remove(id string) error {
// Get returns the mountpoint for the given id after creating the target directories if necessary.
func (d *Driver) Get(id, mountLabel string) (string, error) {
d.Lock()
defer d.Unlock()
mnt := d.active[id]
if mnt != nil {
mnt.count++
return mnt.path, nil
}
mnt = &activeMount{count: 1}
mountpoint := d.mountPath(id)
filesystem := d.zfsPath(id)
options := label.FormatMountLabel("", mountLabel)
@ -335,48 +316,29 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
}
mnt.path = mountpoint
mnt.mounted = true
d.active[id] = mnt
return mountpoint, nil
}
// Put removes the existing mountpoint for the given id if it exists.
func (d *Driver) Put(id string) error {
d.Lock()
defer d.Unlock()
mnt := d.active[id]
if mnt == nil {
logrus.Debugf("[zfs] Put on a non-mounted device %s", id)
// but it might be still here
if d.Exists(id) {
err := mount.Unmount(d.mountPath(id))
if err != nil {
logrus.Debugf("[zfs] Failed to unmount %s zfs fs: %v", id, err)
}
}
return nil
mountpoint := d.mountPath(id)
mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
if err != nil || !mounted {
return err
}
mnt.count--
if mnt.count > 0 {
return nil
}
logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
defer delete(d.active, id)
if mnt.mounted {
logrus.Debugf(`[zfs] unmount("%s")`, mnt.path)
if err := mount.Unmount(mnt.path); err != nil {
return fmt.Errorf("error unmounting to %s: %v", mnt.path, err)
}
if err := mount.Unmount(mountpoint); err != nil {
return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
}
return nil
}
// Exists checks to see if the cache entry exists for the given id.
func (d *Driver) Exists(id string) bool {
d.Lock()
defer d.Unlock()
return d.filesystemsCache[d.zfsPath(id)] == true
}

View file

@ -78,6 +78,7 @@ func setResources(s *specs.Spec, r containertypes.Resources) error {
func setDevices(s *specs.Spec, c *container.Container) error {
// Build lists of devices allowed and created within the container.
var devs []specs.Device
devPermissions := s.Linux.Resources.Devices
if c.HostConfig.Privileged {
hostDevices, err := devices.HostDevices()
if err != nil {
@ -86,18 +87,26 @@ func setDevices(s *specs.Spec, c *container.Container) error {
for _, d := range hostDevices {
devs = append(devs, specDevice(d))
}
rwm := "rwm"
devPermissions = []specs.DeviceCgroup{
{
Allow: true,
Access: &rwm,
},
}
} else {
for _, deviceMapping := range c.HostConfig.Devices {
d, err := getDevicesFromPath(deviceMapping)
d, dPermissions, err := getDevicesFromPath(deviceMapping)
if err != nil {
return err
}
devs = append(devs, d...)
devPermissions = append(devPermissions, dPermissions...)
}
}
s.Linux.Devices = append(s.Linux.Devices, devs...)
s.Linux.Resources.Devices = devPermissions
return nil
}

View file

@ -16,6 +16,7 @@ parent = "smn_cli"
--help Print usage
-i, --input="" Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz
-q, --quiet Suppress the load output. Without this option, a progress bar is displayed.
Loads a tarred repository from a file or the standard input stream.
Restores both images and tags.

View file

@ -20,7 +20,7 @@ set -e
# To update this script on https://get.docker.com,
# use hack/release.sh during a normal release,
# or the following one-liner for script hotfixes:
# s3cmd put --acl-public -P hack/install.sh s3://get.docker.com/index
# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index
#
url="https://get.docker.com/"

View file

@ -121,7 +121,7 @@ fi
if [ "$DOCKER_EXPERIMENTAL" ]; then
echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features'
echo >&2
DOCKER_BUILDTAGS+=" experimental"
DOCKER_BUILDTAGS+=" experimental pkcs11"
fi
if [ -z "$DOCKER_CLIENTONLY" ]; then

View file

@ -5,8 +5,8 @@ VERSION = $(shell cat VERSION)
override_dh_gencontrol:
# if we're on Ubuntu, we need to Recommends: apparmor
echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars
# recommend yubico-piv-tool since we include pkcs11 by default
echo 'yubico:Recommends=yubico-piv-tool (>= 1.1.0~)' >> debian/docker-engine.substvars
# if we are building experimental we recommend yubico-piv-tool
echo 'yubico:Recommends=$(shell [ "$DOCKER_EXPERIMENTAL" ] && echo "yubico-piv-tool (>= 1.1.0~)")' >> debian/docker-engine.substvars
dh_gencontrol
override_dh_auto_build:
@ -22,6 +22,10 @@ override_dh_strip:
override_dh_auto_install:
mkdir -p debian/docker-engine/usr/bin
cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/docker)" debian/docker-engine/usr/bin/docker
cp -aT /usr/local/bin/containerd debian/docker-engine/usr/bin/docker-containerd
cp -aT /usr/local/bin/containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim
cp -aT /usr/local/bin/ctr debian/docker-engine/usr/bin/docker-containerd-ctr
cp -aT /usr/local/bin/runc debian/docker-engine/usr/bin/docker-runc
mkdir -p debian/docker-engine/usr/lib/docker
override_dh_installinit:
@ -36,5 +40,8 @@ override_dh_install:
dh_install
dh_apparmor --profile-name=docker-engine -pdocker-engine
override_dh_shlibdeps:
dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info
%:
dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd)

View file

@ -60,10 +60,12 @@ Requires: device-mapper >= 1.02.90-2
%global with_selinux 1
%endif
# yubico-piv-tool recommends
%if 0%{?_experimental}
# yubico-piv-tool conditional
%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7
Requires: yubico-piv-tool >= 1.1.0
%endif
%endif
# start if with_selinux
%if 0%{?with_selinux}
@ -124,6 +126,14 @@ export DOCKER_GITCOMMIT=%{_gitcommit}
install -d $RPM_BUILD_ROOT/%{_bindir}
install -p -m 755 bundles/%{_origversion}/dynbinary/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker
# install containerd
install -p -m 755 /usr/local/bin/containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd
install -p -m 755 /usr/local/bin/containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim
install -p -m 755 /usr/local/bin/ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr
# install runc
install -p -m 755 /usr/local/bin/runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc
# install udev rules
install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d
install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules
@ -171,6 +181,10 @@ install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/shar
%files
%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md
/%{_bindir}/docker
/%{_bindir}/docker-containerd
/%{_bindir}/docker-containerd-shim
/%{_bindir}/docker-containerd-ctr
/%{_bindir}/docker-runc
/%{_sysconfdir}/udev/rules.d/80-docker.rules
%if 0%{?is_systemd}
/%{_unitdir}/docker.service

View file

@ -9,6 +9,13 @@ if ! command -v docker &> /dev/null; then
false
fi
if [ -z "$DOCKER_TEST_HOST" ]; then
if docker version &> /dev/null; then
echo >&2 'skipping daemon start, since daemon appears to be already started'
return
fi
fi
# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers
exec 41>&1 42>&2

View file

@ -36,7 +36,7 @@ if [ "$(go env GOOS)" == "linux" ] ; then
esac
fi
if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then
if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ] && [ "$DOCKER_EXPERIMENTAL" ]; then
if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then
export CGO_ENABLED=1
export CC=o64-clang

View file

@ -58,14 +58,33 @@ set -e
FROM $image
WORKDIR /usr/src/docker
COPY . /usr/src/docker
RUN mkdir -p /go/src/github.com/docker \
RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \
&& ln -snf /usr/src/docker /go/src/github.com/docker/docker
EOF
# get the RUNC and CONTAINERD commit from the root Dockerfile, this keeps the commits in sync
awk '$1 == "ENV" && $2 == "RUNC_COMMIT" { print; exit }' Dockerfile >> "$DEST/$version/Dockerfile.build"
awk '$1 == "ENV" && $2 == "CONTAINERD_COMMIT" { print; exit }' Dockerfile >> "$DEST/$version/Dockerfile.build"
# add runc and containerd compile and install
cat >> "$DEST/$version/Dockerfile.build" <<-EOF
# Install runc
RUN git clone git://github.com/opencontainers/runc.git "/go/src/github.com/opencontainers/runc" \
&& cd "/go/src/github.com/opencontainers/runc" \
&& git checkout -q "\$RUNC_COMMIT"
RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/opencontainers/runc" \
&& make BUILDTAGS="\$RUNC_BUILDTAGS" && make install
# Install containerd
RUN git clone git://github.com/docker/containerd.git "/go/src/github.com/docker/containerd" \
&& cd "/go/src/github.com/docker/containerd" \
&& git checkout -q "\$CONTAINERD_COMMIT"
RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/docker/containerd" && make && make install
EOF
if [ "$DOCKER_EXPERIMENTAL" ]; then
echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build"
fi
cat >> "$DEST/$version/Dockerfile.build" <<-EOF
RUN mv -v hack/make/.build-deb debian
RUN cp -aL hack/make/.build-deb debian
RUN { echo '$debSource (${debVersion}-0~${suite}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog
RUN dpkg-buildpackage -uc -us
EOF

View file

@ -87,6 +87,26 @@ set -e
cat > "$DEST/$version/Dockerfile.build" <<-EOF
FROM $image
COPY . /usr/src/${rpmName}
RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers
EOF
# get the RUNC and CONTAINERD commit from the root Dockerfile, this keeps the commits in sync
awk '$1 == "ENV" && $2 == "RUNC_COMMIT" { print; exit }' Dockerfile >> "$DEST/$version/Dockerfile.build"
awk '$1 == "ENV" && $2 == "CONTAINERD_COMMIT" { print; exit }' Dockerfile >> "$DEST/$version/Dockerfile.build"
# add runc and containerd compile and install
cat >> "$DEST/$version/Dockerfile.build" <<-EOF
# Install runc
RUN git clone git://github.com/opencontainers/runc.git "/go/src/github.com/opencontainers/runc" \
&& cd "/go/src/github.com/opencontainers/runc" \
&& git checkout -q "\$RUNC_COMMIT"
RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/opencontainers/runc" \
&& make BUILDTAGS="\$RUNC_BUILDTAGS" && make install
# Install containerd
RUN git clone git://github.com/docker/containerd.git "/go/src/github.com/docker/containerd" \
&& cd "/go/src/github.com/docker/containerd" \
&& git checkout -q "\$CONTAINERD_COMMIT"
RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/docker/containerd" && make && make install
EOF
if [ "$DOCKER_EXPERIMENTAL" ]; then
echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build"
@ -97,7 +117,10 @@ set -e
WORKDIR /root/rpmbuild
RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS
WORKDIR /root/rpmbuild/SPECS
RUN tar -cz -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar.gz ${rpmName}
RUN tar -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName}
RUN tar -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd
RUN tar -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc
RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar
RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec
RUN rpmbuild -ba \
--define '_gitcommit $DOCKER_GITCOMMIT' \

View file

@ -14,6 +14,7 @@ set -e
#
# ... and so on and so forth for the builds created by hack/make/build-deb
source "$(dirname "$BASH_SOURCE")/.integration-daemon-start"
source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch"
: ${DOCKER_RELEASE_DIR:=$DEST}

View file

@ -14,6 +14,7 @@ set -e
#
# ... and so on and so forth for the builds created by hack/make/build-rpm
source "$(dirname "$BASH_SOURCE")/.integration-daemon-start"
source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch"
: ${DOCKER_RELEASE_DIR:=$DEST}

View file

@ -19,8 +19,6 @@ for d in "$CROSS/"*/*; do
mkdir -p "$DEST/$GOOS/$GOARCH"
TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz"
mkdir -p "$DEST/build"
mkdir -p "$DEST/build/usr/local/bin"
cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION"
copy_containerd "$DEST/build/usr/local/bin/"

View file

@ -43,7 +43,7 @@ cd /go/src/github.com/docker/docker
[ -x hack/make.sh ] || usage
export AWS_DEFAULT_REGION
: ${AWS_DEFAULT_REGION:=us-west-2}
: ${AWS_DEFAULT_REGION:=us-west-1}
RELEASE_BUNDLES=(
binary
@ -79,8 +79,6 @@ fi
setup_s3() {
echo "Setting up S3"
# TODO: Move to Dockerfile
pip install awscli==1.10.15
# Try creating the bucket. Ignore errors (it might already exist).
aws s3 mb "s3://$BUCKET" 2>/dev/null || true
# Check access to the bucket.
@ -104,8 +102,7 @@ s3_url() {
echo "https://$BUCKET_PATH"
;;
*)
# TODO: remove s3cmd dependency
BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' )
BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com"
if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
else

View file

@ -24,7 +24,7 @@ clone git golang.org/x/net 47990a1ba55743e6ef1affd3a14e5bac8553615d https://gith
clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git
clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
clone git github.com/docker/go-connections v0.2.0
clone git github.com/docker/engine-api v0.3.0
clone git github.com/docker/engine-api v0.3.1
clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
clone git github.com/imdario/mergo 0.2.1

View file

@ -0,0 +1,95 @@
package main
import (
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"sync"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
)
func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) {
maxConcurrency := runtime.GOMAXPROCS(0)
numIterations := c.N
outerGroup := &sync.WaitGroup{}
outerGroup.Add(maxConcurrency)
chErr := make(chan error, numIterations*2*maxConcurrency)
for i := 0; i < maxConcurrency; i++ {
go func() {
defer outerGroup.Done()
innerGroup := &sync.WaitGroup{}
innerGroup.Add(2)
go func() {
defer innerGroup.Done()
for i := 0; i < numIterations; i++ {
args := []string{"run", "-d", defaultSleepImage}
args = append(args, defaultSleepCommand...)
out, _, err := dockerCmdWithError(args...)
if err != nil {
chErr <- fmt.Errorf(out)
return
}
id := strings.TrimSpace(out)
tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id)
if err != nil {
chErr <- err
return
}
defer os.RemoveAll(tmpDir)
out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir)
if err != nil {
chErr <- fmt.Errorf(out)
return
}
out, _, err = dockerCmdWithError("kill", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
out, _, err = dockerCmdWithError("start", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
out, _, err = dockerCmdWithError("kill", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
// don't do an rm -f here since it can potentially ignore errors from the graphdriver
out, _, err = dockerCmdWithError("rm", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
}
}()
go func() {
defer innerGroup.Done()
for i := 0; i < numIterations; i++ {
out, _, err := dockerCmdWithError("ps")
if err != nil {
chErr <- fmt.Errorf(out)
}
}
}()
innerGroup.Wait()
}()
}
outerGroup.Wait()
close(chErr)
for err := range chErr {
c.Assert(err, checker.IsNil)
}
}

View file

@ -485,6 +485,17 @@ func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) {
dockerCmd(c, "exec", "parent", "true")
}
func (s *DockerSuite) TestExecUlimits(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testexeculimits"
runSleepingContainer(c, "-d", "--ulimit", "nproc=21", "--name", name)
c.Assert(waitRun(name), checker.IsNil)
out, _, err := dockerCmdWithError("exec", name, "sh", "-c", "ulimit -p")
c.Assert(err, checker.IsNil)
c.Assert(strings.TrimSpace(out), checker.Equals, "21")
}
// #15750
func (s *DockerSuite) TestExecStartFails(c *check.C) {
// TODO Windows CI. This test should be portable. Figure out why it fails

View file

@ -1452,3 +1452,16 @@ func (s *DockerSuite) TestDockerNetworkInternalMode(c *check.C) {
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
}
// Test for #21401
func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *check.C) {
dockerCmd(c, "network", "create", "test@#$")
assertNwIsAvailable(c, "test@#$")
dockerCmd(c, "network", "rm", "test@#$")
assertNwNotAvailable(c, "test@#$")
dockerCmd(c, "network", "create", "kiwl$%^")
assertNwIsAvailable(c, "kiwl$%^")
dockerCmd(c, "network", "rm", "kiwl$%^")
assertNwNotAvailable(c, "kiwl$%^")
}

View file

@ -653,7 +653,6 @@ func (s *DockerSuite) TestPushToCentralRegistryUnauthorized(c *check.C) {
out, _, err := dockerCmdWithError("push", repoName)
c.Assert(err, check.NotNil, check.Commentf(out))
c.Assert(out, check.Not(checker.Contains), "Retrying")
c.Assert(out, checker.Contains, "unauthorized: authentication required")
}
func getTestTokenService(status int, body string) *httptest.Server {

View file

@ -4290,3 +4290,21 @@ func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) {
out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf(out))
}
func (s *DockerSuite) TestRunTooLongHostname(c *check.C) {
// Test case in #21445
hostname1 := "this-is-a-way-too-long-hostname-but-it-should-give-a-nice-error.local"
out, _, err := dockerCmdWithError("run", "--hostname", hostname1, "busybox", "echo", "test")
c.Assert(err, checker.NotNil, check.Commentf("Expected docker run to fail!"))
c.Assert(out, checker.Contains, "invalid hostname format for --hostname:", check.Commentf("Expected to have 'invalid hostname format for --hostname:' in the output, get: %s!", out))
// HOST_NAME_MAX=64 so 65 bytes will fail
hostname2 := "this-is-a-hostname-with-65-bytes-so-it-should-give-an-error.local"
out, _, err = dockerCmdWithError("run", "--hostname", hostname2, "busybox", "echo", "test")
c.Assert(err, checker.NotNil, check.Commentf("Expected docker run to fail!"))
c.Assert(out, checker.Contains, "invalid hostname format for --hostname:", check.Commentf("Expected to have 'invalid hostname format for --hostname:' in the output, get: %s!", out))
// 64 bytes will be OK
hostname3 := "this-is-a-hostname-with-64-bytes-so-will-not-give-an-error.local"
dockerCmd(c, "run", "--hostname", hostname3, "busybox", "echo", "test")
}

View file

@ -13,6 +13,7 @@ import (
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/docker/docker/pkg/homedir"
@ -980,3 +981,29 @@ func (s *DockerSuite) TestRunPidsLimit(c *check.C) {
out = inspectField(c, "skittles", "HostConfig.PidsLimit")
c.Assert(out, checker.Equals, "2", check.Commentf("setting the pids limit failed"))
}
func (s *DockerSuite) TestRunPrivilegedAllowedDevices(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
file := "/sys/fs/cgroup/devices/devices.list"
out, _ := dockerCmd(c, "run", "--privileged", "busybox", "cat", file)
c.Logf("out: %q", out)
c.Assert(strings.TrimSpace(out), checker.Equals, "a *:* rwm")
}
func (s *DockerSuite) TestRunUserDeviceAllowed(c *check.C) {
testRequires(c, DaemonIsLinux)
fi, err := os.Stat("/dev/snd/timer")
if err != nil {
c.Skip("Host does not have /dev/snd/timer")
}
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
c.Skip("Could not stat /dev/snd/timer")
}
file := "/sys/fs/cgroup/devices/devices.list"
out, _ := dockerCmd(c, "run", "--device", "/dev/snd/timer:w", "busybox", "cat", file)
c.Assert(out, checker.Contains, fmt.Sprintf("c %d:%d w", stat.Rdev/256, stat.Rdev%256))
}

View file

@ -157,8 +157,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanOverlapParent(c *check.C) {
func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) {
// create a dual stack multi-subnet Macvlan bridge mode network and validate connectivity between four containers, two on each subnet
testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "--driver=macvlan", "--subnet=172.28.100.0/24", "--subnet=172.28.102.0/24", "--gateway=172.28.102.254",
testRequires(c, DaemonIsLinux, IPv6, MacvlanKernelSupport, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.100.0/24", "--subnet=172.28.102.0/24", "--gateway=172.28.102.254",
"--subnet=2001:db8:abc2::/64", "--subnet=2001:db8:abc4::/64", "--gateway=2001:db8:abc4::254", "dualstackbridge")
// Ensure the network was created
assertNwIsAvailable(c, "dualstackbridge")
@ -212,8 +212,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) {
func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) {
// create a dual stack multi-subnet Ipvlan L2 network and validate connectivity within the subnets, two on each subnet
testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.200.0/24", "--subnet=172.28.202.0/24", "--gateway=172.28.202.254",
testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.200.0/24", "--subnet=172.28.202.0/24", "--gateway=172.28.202.254",
"--subnet=2001:db8:abc8::/64", "--subnet=2001:db8:abc6::/64", "--gateway=2001:db8:abc6::254", "dualstackl2")
// Ensure the network was created
assertNwIsAvailable(c, "dualstackl2")
@ -266,8 +266,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) {
func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) {
// create a dual stack multi-subnet Ipvlan L3 network and validate connectivity between all four containers per L3 mode
testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.10.0/24", "--subnet=172.28.12.0/24", "--gateway=172.28.12.254",
testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, IPv6)
dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.10.0/24", "--subnet=172.28.12.0/24", "--gateway=172.28.12.254",
"--subnet=2001:db8:abc9::/64", "--subnet=2001:db8:abc7::/64", "--gateway=2001:db8:abc7::254", "-o", "ipvlan_mode=l3", "dualstackl3")
// Ensure the network was created
assertNwIsAvailable(c, "dualstackl3")
@ -325,8 +325,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) {
func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) {
// Ensure the default gateways, next-hops and default dev devices are properly set
testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "--driver=macvlan", "--subnet=172.28.130.0/24",
testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.130.0/24",
"--subnet=2001:db8:abca::/64", "--gateway=2001:db8:abca::254", "-o", "macvlan_mode=bridge", "dualstackbridge")
assertNwIsAvailable(c, "dualstackbridge")
dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "busybox", "top")
@ -341,7 +341,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) {
// Verify ipvlan l2 mode sets the proper default gateway routes via netlink
// for either an explicitly set route by the user or inferred via default IPAM
dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.140.0/24", "--gateway=172.28.140.254",
dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.140.0/24", "--gateway=172.28.140.254",
"--subnet=2001:db8:abcb::/64", "-o", "ipvlan_mode=l2", "dualstackl2")
assertNwIsAvailable(c, "dualstackl2")
dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "busybox", "top")
@ -355,7 +355,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) {
c.Assert(out, checker.Contains, "default via 2001:db8:abcb::1 dev eth0")
// Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops
dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.160.0/24", "--gateway=172.28.160.254",
dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.160.0/24", "--gateway=172.28.160.254",
"--subnet=2001:db8:abcd::/64", "--gateway=2001:db8:abcd::254", "-o", "ipvlan_mode=l3", "dualstackl3")
assertNwIsAvailable(c, "dualstackl3")
dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "busybox", "top")

View file

@ -49,6 +49,10 @@ var (
// to be created which would result in a layer depth
// greater than the 125 max.
ErrMaxDepthExceeded = errors.New("max depth exceeded")
// ErrNotSupported is used when the action is not supppoted
// on the current platform
ErrNotSupported = errors.New("not support on this platform")
)
// ChainID is the content-addressable ID of a layer.

View file

@ -12,6 +12,7 @@ type mountedLayer struct {
mountID string
initID string
parent *roLayer
path string
layerStore *layerStore
references map[RWLayer]*referencedRWLayer
@ -131,10 +132,21 @@ func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
return "", ErrLayerNotRetained
}
rl.activityCount++
return rl.mountedLayer.Mount(mountLabel)
if rl.activityCount > 0 {
rl.activityCount++
return rl.path, nil
}
m, err := rl.mountedLayer.Mount(mountLabel)
if err == nil {
rl.activityCount++
rl.path = m
}
return m, err
}
// Unmount decrements the activity count and unmounts the underlying layer
// Callers should only call `Unmount` once per call to `Mount`, even on error.
func (rl *referencedRWLayer) Unmount() error {
rl.activityL.Lock()
defer rl.activityL.Unlock()
@ -145,7 +157,11 @@ func (rl *referencedRWLayer) Unmount() error {
if rl.activityCount == -1 {
return ErrLayerNotRetained
}
rl.activityCount--
if rl.activityCount > 0 {
return nil
}
return rl.mountedLayer.Unmount()
}

View file

@ -79,6 +79,7 @@ func (clnt *client) AddProcess(containerID, processFriendlyName string, specp Pr
ApparmorProfile: sp.ApparmorProfile,
SelinuxLabel: sp.SelinuxLabel,
NoNewPrivileges: sp.NoNewPrivileges,
Rlimits: convertRlimits(sp.Rlimits),
}
iopipe, err := p.openFifos(sp.Terminal)

View file

@ -3,6 +3,8 @@ package libcontainerd
import (
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
@ -18,6 +20,7 @@ import (
"github.com/docker/docker/utils"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
)
const (
@ -77,6 +80,8 @@ func New(stateDir string, options ...RemoteOption) (_ Remote, err error) {
}
}
// don't output the grpc reconnect logging
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
dialOpts := append([]grpc.DialOption{grpc.WithInsecure()},
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)

View file

@ -39,3 +39,14 @@ func systemPid(ctr *containerd.Container) uint32 {
}
return pid
}
func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) {
for _, r := range sr {
cr = append(cr, &containerd.Rlimit{
Type: r.Type,
Hard: r.Hard,
Soft: r.Soft,
})
}
return
}

View file

@ -8,7 +8,7 @@ docker-load - Load an image from a tar archive or STDIN
**docker load**
[**--help**]
[**-i**|**--input**[=*INPUT*]]
[**-q**|**--quiet**]
# DESCRIPTION
@ -22,6 +22,9 @@ Restores both images and tags.
**-i**, **--input**=""
Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz.
**-q**, **--quiet**
Suppress the load output. Without this option, a progress bar is displayed.
# EXAMPLES
$ docker images

View file

@ -130,7 +130,7 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool)
return nil, err
}
retries++
logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", req.URL, timeOff)
logrus.Warnf("Unable to connect to plugin: %s:%s, retrying in %v", req.URL.Host, req.URL.Path, timeOff)
time.Sleep(timeOff)
continue
}

View file

@ -65,23 +65,36 @@ type Plugin struct {
// Manifest of the plugin (see above)
Manifest *Manifest `json:"-"`
activatErr error
activateOnce sync.Once
// error produced by activation
activateErr error
// specifies if the activation sequence is completed (not if it is sucessful or not)
activated bool
// wait for activation to finish
activateWait *sync.Cond
}
func newLocalPlugin(name, addr string) *Plugin {
return &Plugin{
Name: name,
Addr: addr,
TLSConfig: tlsconfig.Options{InsecureSkipVerify: true},
Name: name,
Addr: addr,
TLSConfig: tlsconfig.Options{InsecureSkipVerify: true},
activateWait: sync.NewCond(&sync.Mutex{}),
}
}
func (p *Plugin) activate() error {
p.activateOnce.Do(func() {
p.activatErr = p.activateWithLock()
})
return p.activatErr
p.activateWait.L.Lock()
if p.activated {
p.activateWait.L.Unlock()
return p.activateErr
}
p.activateErr = p.activateWithLock()
p.activated = true
p.activateWait.L.Unlock()
p.activateWait.Broadcast()
return p.activateErr
}
func (p *Plugin) activateWithLock() error {
@ -108,7 +121,19 @@ func (p *Plugin) activateWithLock() error {
return nil
}
func (p *Plugin) waitActive() error {
p.activateWait.L.Lock()
for !p.activated {
p.activateWait.Wait()
}
p.activateWait.L.Unlock()
return p.activateErr
}
func (p *Plugin) implements(kind string) bool {
if err := p.waitActive(); err != nil {
return false
}
for _, driver := range p.Manifest.Implements {
if driver == kind {
return true
@ -221,7 +246,7 @@ func GetAll(imp string) ([]*Plugin, error) {
var out []*Plugin
for pl := range chPl {
if pl.err != nil {
logrus.Error(err)
logrus.Error(pl.err)
continue
}
if pl.pl.implements(imp) {

View file

@ -60,7 +60,7 @@ To build the Docker daemon, you will additionally need:
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
* yubico-piv-tool version 1.1.0 or later
* yubico-piv-tool version 1.1.0 or later (for experimental)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.

View file

@ -270,8 +270,9 @@ docker build -t docker .
# static binaries are still pushed to s3
docker run \
-e AWS_S3_BUCKET=test.docker.com \
-e AWS_ACCESS_KEY \
-e AWS_SECRET_KEY \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-e AWS_DEFAULT_REGION \
-i -t --privileged \
docker \
hack/release.sh
@ -434,8 +435,9 @@ docker build -t docker .
# static binaries are still pushed to s3
docker run \
-e AWS_S3_BUCKET=get.docker.com \
-e AWS_ACCESS_KEY \
-e AWS_SECRET_KEY \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-e AWS_DEFAULT_REGION \
-i -t --privileged \
docker \
hack/release.sh

View file

@ -244,8 +244,9 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host
// Validate if the given hostname is RFC 1123 (https://tools.ietf.org/html/rfc1123) compliant.
hostname := *flHostname
if hostname != "" {
// Linux hostname is limited to HOST_NAME_MAX=64, not not including the terminating null byte.
matched, _ := regexp.MatchString("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$", hostname)
if !matched {
if len(hostname) > 64 || !matched {
return nil, nil, nil, cmd, fmt.Errorf("invalid hostname format for --hostname: %s", hostname)
}
}

View file

@ -390,6 +390,7 @@ func TestParseHostname(t *testing.T) {
"host-name": "host-name",
"hostname123": "hostname123",
"123hostname": "123hostname",
"hostname-of-64-bytes-long-should-be-valid-and-without-any-errors": "hostname-of-64-bytes-long-should-be-valid-and-without-any-errors",
}
invalidHostnames := map[string]string{
"^hostname": "invalid hostname format for --hostname: ^hostname",
@ -397,6 +398,7 @@ func TestParseHostname(t *testing.T) {
"host&name": "invalid hostname format for --hostname: host&name",
"-hostname": "invalid hostname format for --hostname: -hostname",
"host_name": "invalid hostname format for --hostname: host_name",
"hostname-of-65-bytes-long-should-be-invalid-and-be-given-an-error": "invalid hostname format for --hostname: hostname-of-65-bytes-long-should-be-invalid-and-be-given-an-error",
}
hostnameWithDomain := "--hostname=hostname.domainname"
hostnameWithDomainTld := "--hostname=hostname.domainname.tld"

View file

@ -97,10 +97,14 @@ func (cli *Client) getAPIPath(p string, query url.Values) string {
} else {
apiPath = fmt.Sprintf("%s%s", cli.basePath, p)
}
if len(query) > 0 {
apiPath += "?" + query.Encode()
u := &url.URL{
Path: apiPath,
}
return apiPath
if len(query) > 0 {
u.RawQuery = query.Encode()
}
return u.String()
}
// ClientVersion returns the version string associated with this

View file

@ -127,9 +127,8 @@ func (s *VolumeStore) List() ([]volume.Volume, []string, error) {
s.locks.Lock(name)
storedV, exists := s.getNamed(name)
if !exists {
s.setNamed(v, "")
}
// Note: it's not safe to populate the cache here because the volume may have been
// deleted before we acquire a lock on its name
if exists && storedV.DriverName() != v.DriverName() {
logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName())
s.locks.Unlock(v.Name())

View file

@ -26,19 +26,20 @@ func (NoopVolume) Unmount() error { return nil }
// FakeVolume is a fake volume with a random name
type FakeVolume struct {
name string
name string
driverName string
}
// NewFakeVolume creates a new fake volume for testing
func NewFakeVolume(name string) volume.Volume {
return FakeVolume{name: name}
func NewFakeVolume(name string, driverName string) volume.Volume {
return FakeVolume{name: name, driverName: driverName}
}
// Name is the name of the volume
func (f FakeVolume) Name() string { return f.name }
// DriverName is the name of the driver
func (FakeVolume) DriverName() string { return "fake" }
func (f FakeVolume) DriverName() string { return f.driverName }
// Path is the filesystem path to the volume
func (FakeVolume) Path() string { return "fake" }
@ -72,7 +73,7 @@ func (d *FakeDriver) Create(name string, opts map[string]string) (volume.Volume,
if opts != nil && opts["error"] != "" {
return nil, fmt.Errorf(opts["error"])
}
v := NewFakeVolume(name)
v := NewFakeVolume(name, d.name)
d.vols[name] = v
return v, nil
}