Merge pull request #328 from thaJeztah/19.03_backport_jenkinsfile
[19.03 backport] Jenkinsfile and related test-changes
This commit is contained in:
commit
9ae801cfd1
52 changed files with 977 additions and 1992 deletions
|
@ -3,5 +3,4 @@ bundles
|
|||
vendor/pkg
|
||||
.go-pkg-cache
|
||||
.git
|
||||
hack/integration-cli-on-swarm/integration-cli-on-swarm
|
||||
|
||||
|
|
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
|
@ -12,6 +12,5 @@ daemon/graphdriver/overlay2/** @dmcgowan
|
|||
daemon/graphdriver/windows/** @johnstep @jhowardmsft
|
||||
daemon/logger/awslogs/** @samuelkarp
|
||||
hack/** @tianon
|
||||
hack/integration-cli-on-swarm/** @AkihiroSuda
|
||||
plugin/** @cpuguy83
|
||||
project/** @thaJeztah
|
||||
|
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -3,6 +3,7 @@
|
|||
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
||||
*.exe
|
||||
*.exe~
|
||||
*.gz
|
||||
*.orig
|
||||
test.main
|
||||
.*.swp
|
||||
|
@ -19,6 +20,6 @@ contrib/builder/rpm/*/changelog
|
|||
dockerversion/version_autogen.go
|
||||
dockerversion/version_autogen_unix.go
|
||||
vendor/pkg/
|
||||
hack/integration-cli-on-swarm/integration-cli-on-swarm
|
||||
coverage.txt
|
||||
go-test-report.json
|
||||
profile.out
|
||||
junit-report.xml
|
||||
|
|
47
Dockerfile
47
Dockerfile
|
@ -73,17 +73,6 @@ RUN set -x \
|
|||
esac \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
|
||||
|
||||
FROM base AS docker-py
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT ac922192959870774ad8428344d9faa0555f7ba6
|
||||
RUN git clone https://github.com/docker/docker-py.git /build \
|
||||
&& cd /build \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT
|
||||
|
||||
|
||||
|
||||
FROM base AS swagger
|
||||
# Install go-swagger for validating swagger.yaml
|
||||
ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb
|
||||
|
@ -94,7 +83,6 @@ RUN set -x \
|
|||
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
|
||||
FROM base AS frozen-images
|
||||
RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
|
@ -181,6 +169,12 @@ COPY hack/dockerfile/install/install.sh ./install.sh
|
|||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS gotestsum
|
||||
ENV INSTALL_BINARY_NAME=gotestsum
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS dockercli
|
||||
ENV INSTALL_BINARY_NAME=dockercli
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
|
@ -228,25 +222,14 @@ RUN apt-get update && apt-get install -y \
|
|||
jq \
|
||||
libcap2-bin \
|
||||
libdevmapper-dev \
|
||||
# libffi-dev and libssl-dev appear to be required for compiling paramiko on s390x/ppc64le
|
||||
libffi-dev \
|
||||
libssl-dev \
|
||||
libudev-dev \
|
||||
libsystemd-dev \
|
||||
binutils-mingw-w64 \
|
||||
g++-mingw-w64-x86-64 \
|
||||
net-tools \
|
||||
pigz \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
# python-cffi appears to be required for compiling paramiko on s390x/ppc64le
|
||||
python-cffi \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
python-setuptools \
|
||||
python-websocket \
|
||||
python-wheel \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
thin-provisioning-tools \
|
||||
vim \
|
||||
vim-common \
|
||||
|
@ -258,9 +241,13 @@ RUN apt-get update && apt-get install -y \
|
|||
libnet1 \
|
||||
libnl-3-200 \
|
||||
--no-install-recommends
|
||||
|
||||
RUN pip3 install yamllint==1.16.0
|
||||
|
||||
COPY --from=swagger /build/swagger* /usr/local/bin/
|
||||
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||
COPY --from=gometalinter /build/ /usr/local/bin/
|
||||
COPY --from=gotestsum /build/ /usr/local/bin/
|
||||
COPY --from=tomlv /build/ /usr/local/bin/
|
||||
COPY --from=vndr /build/ /usr/local/bin/
|
||||
COPY --from=tini /build/ /usr/local/bin/
|
||||
|
@ -270,16 +257,6 @@ COPY --from=proxy /build/ /usr/local/bin/
|
|||
COPY --from=dockercli /build/ /usr/local/cli
|
||||
COPY --from=registry /build/registry* /usr/local/bin/
|
||||
COPY --from=criu /build/ /usr/local/
|
||||
COPY --from=docker-py /build/ /docker-py
|
||||
# TODO: This is for the docker-py tests, which shouldn't really be needed for
|
||||
# this image, but currently CI is expecting to run this image. This should be
|
||||
# split out into a separate image, including all the `python-*` deps installed
|
||||
# above.
|
||||
RUN cd /docker-py \
|
||||
&& pip install docker-pycreds==0.4.0 \
|
||||
&& pip install paramiko==2.4.2 \
|
||||
&& pip install yamllint==1.5.0 \
|
||||
&& pip install -r test-requirements.txt
|
||||
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||
COPY --from=djs55/vpnkit@sha256:e508a17cfacc8fd39261d5b4e397df2b953690da577e2c987a47630cd0c42f8e /vpnkit /usr/local/bin/vpnkit.x86_64
|
||||
|
||||
|
|
759
Jenkinsfile
vendored
759
Jenkinsfile
vendored
|
@ -1,154 +1,318 @@
|
|||
def withGithubStatus(String context, Closure cl) {
|
||||
def setGithubStatus = { String state ->
|
||||
try {
|
||||
def backref = "${BUILD_URL}flowGraphTable/"
|
||||
def reposSourceURL = scm.repositories[0].getURIs()[0].toString()
|
||||
step(
|
||||
$class: 'GitHubCommitStatusSetter',
|
||||
contextSource: [$class: "ManuallyEnteredCommitContextSource", context: context],
|
||||
errorHandlers: [[$class: 'ShallowAnyErrorHandler']],
|
||||
reposSource: [$class: 'ManuallyEnteredRepositorySource', url: reposSourceURL],
|
||||
statusBackrefSource: [$class: 'ManuallyEnteredBackrefSource', backref: backref],
|
||||
statusResultSource: [$class: 'ConditionalStatusResultSource', results: [[$class: 'AnyBuildResult', state: state]]],
|
||||
)
|
||||
} catch (err) {
|
||||
echo "Exception from GitHubCommitStatusSetter for $context: $err"
|
||||
}
|
||||
}
|
||||
|
||||
setGithubStatus 'PENDING'
|
||||
|
||||
try {
|
||||
cl()
|
||||
} catch (err) {
|
||||
// AbortException signals a "normal" build failure.
|
||||
if (!(err instanceof hudson.AbortException)) {
|
||||
echo "Exception in withGithubStatus for $context: $err"
|
||||
}
|
||||
setGithubStatus 'FAILURE'
|
||||
throw err
|
||||
}
|
||||
setGithubStatus 'SUCCESS'
|
||||
}
|
||||
|
||||
|
||||
#!groovy
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
buildDiscarder(logRotator(daysToKeepStr: '30'))
|
||||
timeout(time: 3, unit: 'HOURS')
|
||||
timeout(time: 2, unit: 'HOURS')
|
||||
timestamps()
|
||||
}
|
||||
parameters {
|
||||
booleanParam(name: 'unit_validate', defaultValue: true, description: 'x86 unit tests and vendor check')
|
||||
booleanParam(name: 'janky', defaultValue: true, description: 'x86 Build/Test')
|
||||
booleanParam(name: 'experimental', defaultValue: true, description: 'x86 Experimental Build/Test ')
|
||||
booleanParam(name: 'z', defaultValue: true, description: 'IBM Z (s390x) Build/Test')
|
||||
booleanParam(name: 'powerpc', defaultValue: true, description: 'PowerPC (ppc64le) Build/Test')
|
||||
booleanParam(name: 'vendor', defaultValue: true, description: 'Vendor')
|
||||
booleanParam(name: 'windowsRS1', defaultValue: true, description: 'Windows 2016 (RS1) Build/Test')
|
||||
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
|
||||
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
|
||||
booleanParam(name: 'windowsRS5', defaultValue: false, description: 'Windows 2019 (RS5) Build/Test')
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '1'
|
||||
DOCKER_EXPERIMENTAL = '1'
|
||||
DOCKER_GRAPHDRIVER = 'overlay2'
|
||||
APT_MIRROR = 'cdn-fastly.deb.debian.org'
|
||||
CHECK_CONFIG_COMMIT = '78405559cfe5987174aa2cb6463b9b2c1b917255'
|
||||
TIMEOUT = '120m'
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('unit-validate') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.unit_validate }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Validate") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/validate/default
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Docker-py") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary-daemon \
|
||||
test-docker-py
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
|
||||
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo 'Chowning /workspace to jenkins user'
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo 'Creating docker-py-bundles.tar.gz'
|
||||
tar -czf docker-py-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'docker-py-bundles.tar.gz'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Static") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh binary-daemon
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Cross") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh cross
|
||||
'''
|
||||
}
|
||||
}
|
||||
// needs to be last stage that calls make.sh for the junit report to work
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Validate vendor") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/validate/vendor
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build e2e image") {
|
||||
steps {
|
||||
sh '''
|
||||
echo "Building e2e image"
|
||||
docker build --build-arg DOCKER_GITCOMMIT=${GIT_COMMIT} -t moby-e2e-test -f Dockerfile.e2e .
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo 'Ensuring container killed.'
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo 'Chowning /workspace to jenkins user'
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo 'Creating unit-bundles.tar.gz'
|
||||
tar -czvf unit-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'unit-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('janky') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.janky }
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 'ubuntu-1604-overlay2-stable'
|
||||
}
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
withCredentials([string(credentialsId: '52af932f-f13f-429e-8467-e7ff8b965cdb', variable: 'CODECOV_TOKEN')]) {
|
||||
withGithubStatus('janky') {
|
||||
sh '''
|
||||
# todo: include ip_vs in base image
|
||||
sudo modprobe ip_vs
|
||||
|
||||
GITCOMMIT=$(git rev-parse --short HEAD)
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t docker:$GITCOMMIT .
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Run tests") {
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
# bash is needed so 'jobs -p' works properly
|
||||
# it also accepts setting inline envvars for functions without explicitly exporting
|
||||
|
||||
run_tests() {
|
||||
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
|
||||
docker run $rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name "$CONTAINER_NAME" \
|
||||
-e KEEPBUNDLE=1 \
|
||||
-e TESTDEBUG \
|
||||
-e TESTFLAGS \
|
||||
-e TEST_INTEGRATION_DEST \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TIMEOUT \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
"$1" \
|
||||
test-integration
|
||||
}
|
||||
|
||||
trap "exit" INT TERM
|
||||
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
|
||||
|
||||
CONTAINER_NAME=docker-pr$BUILD_NUMBER
|
||||
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GITCOMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER=vfs \
|
||||
-e DOCKER_EXECDRIVER=native \
|
||||
-e CODECOV_TOKEN \
|
||||
-e GIT_SHA1=${GIT_COMMIT} \
|
||||
docker:$GITCOMMIT \
|
||||
hack/ci/janky
|
||||
'''
|
||||
sh '''
|
||||
GITCOMMIT=$(git rev-parse --short HEAD)
|
||||
echo "Building e2e image"
|
||||
docker build --build-arg DOCKER_GITCOMMIT=$GITCOMMIT -t moby-e2e-test -f Dockerfile.e2e .
|
||||
--name ${CONTAINER_NAME}-build \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary-daemon
|
||||
|
||||
# flaky + integration
|
||||
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
|
||||
|
||||
# integration-cli first set
|
||||
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-check.f ^(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)" run_tests &
|
||||
|
||||
# integration-cli second set
|
||||
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-check.f ^(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)" run_tests &
|
||||
|
||||
set +x
|
||||
c=0
|
||||
for job in $(jobs -p); do
|
||||
wait ${job} || c=$?
|
||||
done
|
||||
exit $c
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
sh '''
|
||||
echo "Creating bundles.tar.gz"
|
||||
(find bundles -name '*.log' -o -name '*.prof' -o -name integration.test | xargs tar -czf bundles.tar.gz) || true
|
||||
'''
|
||||
archiveArtifacts artifacts: 'bundles.tar.gz'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('experimental') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.experimental }
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 'ubuntu-1604-aufs-stable'
|
||||
}
|
||||
}
|
||||
steps {
|
||||
withGithubStatus('experimental') {
|
||||
sh '''
|
||||
GITCOMMIT=$(git rev-parse --short HEAD)
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t docker:${GITCOMMIT}-exp .
|
||||
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
-e DOCKER_EXPERIMENTAL=y \
|
||||
--name docker-pr-exp$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GITCOMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER=vfs \
|
||||
-e DOCKER_EXECDRIVER=native \
|
||||
docker:${GITCOMMIT}-exp \
|
||||
hack/ci/experimental
|
||||
'''
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr-exp$BUILD_NUMBER || true
|
||||
echo "Creating janky-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf janky-bundles.tar.gz
|
||||
'''
|
||||
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
sh '''
|
||||
echo "Creating bundles.tar.gz"
|
||||
(find bundles -name '*.log' -o -name '*.prof' -o -name integration.test | xargs tar -czf bundles.tar.gz) || true
|
||||
'''
|
||||
archiveArtifacts artifacts: 'bundles.tar.gz'
|
||||
archiveArtifacts artifacts: 'janky-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -157,46 +321,166 @@ pipeline {
|
|||
beforeAgent true
|
||||
expression { params.z }
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 's390x-ubuntu-1604'
|
||||
}
|
||||
}
|
||||
agent { label 's390x-ubuntu-1604' }
|
||||
// s390x machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
withGithubStatus('z') {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
GITCOMMIT=$(git rev-parse --short HEAD)
|
||||
|
||||
test -f Dockerfile.s390x && \
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t docker-s390x:$GITCOMMIT -f Dockerfile.s390x . || \
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t docker-s390x:$GITCOMMIT -f Dockerfile .
|
||||
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr-s390x$BUILD_NUMBER \
|
||||
-e DOCKER_GRAPHDRIVER=vfs \
|
||||
-e DOCKER_EXECDRIVER=native \
|
||||
-e TIMEOUT="300m" \
|
||||
-e DOCKER_GITCOMMIT=${GITCOMMIT} \
|
||||
docker-s390x:$GITCOMMIT \
|
||||
hack/ci/z
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr-s390x$BUILD_NUMBER || true
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" s390x/busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
sh '''
|
||||
echo "Creating bundles.tar.gz"
|
||||
find bundles -name '*.log' | xargs tar -czf bundles.tar.gz
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
archiveArtifacts artifacts: 'bundles.tar.gz'
|
||||
|
||||
sh '''
|
||||
echo "Creating s390x-integration-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 's390x-integration-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('z-master') {
|
||||
when {
|
||||
beforeAgent true
|
||||
branch 'master'
|
||||
expression { params.z }
|
||||
}
|
||||
agent { label 's390x-ubuntu-1604' }
|
||||
// s390x machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Integration-cli tests") {
|
||||
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating s390x-integration-cli-bundles.tar.gz"
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-cli-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 's390x-integration-cli-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -205,77 +489,164 @@ pipeline {
|
|||
beforeAgent true
|
||||
expression { params.powerpc }
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 'ppc64le-ubuntu-1604'
|
||||
}
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
// power machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
withGithubStatus('powerpc') {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
GITCOMMIT=$(git rev-parse --short HEAD)
|
||||
|
||||
test -f Dockerfile.ppc64le && \
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t docker-powerpc:$GITCOMMIT -f Dockerfile.ppc64le . || \
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t docker-powerpc:$GITCOMMIT -f Dockerfile .
|
||||
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr-power$BUILD_NUMBER \
|
||||
-e DOCKER_GRAPHDRIVER=vfs \
|
||||
-e DOCKER_EXECDRIVER=native \
|
||||
-e DOCKER_GITCOMMIT=${GITCOMMIT} \
|
||||
-e TIMEOUT="180m" \
|
||||
docker-powerpc:$GITCOMMIT \
|
||||
hack/ci/powerpc
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .'
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr-power$BUILD_NUMBER || true
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" ppc64le/busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
sh '''
|
||||
echo "Creating bundles.tar.gz"
|
||||
find bundles -name '*.log' | xargs tar -czf bundles.tar.gz
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
archiveArtifacts artifacts: 'bundles.tar.gz'
|
||||
|
||||
sh '''
|
||||
echo "Creating powerpc-integration-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'powerpc-integration-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('vendor') {
|
||||
stage('powerpc-master') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.vendor }
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 'ubuntu-1604-aufs-stable'
|
||||
}
|
||||
branch 'master'
|
||||
expression { params.powerpc }
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
// power machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
withGithubStatus('vendor') {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
GITCOMMIT=$(git rev-parse --short HEAD)
|
||||
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t dockerven:$GITCOMMIT .
|
||||
|
||||
docker run --rm -t --privileged \
|
||||
--name dockerven-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GRAPHDRIVER=vfs \
|
||||
-e DOCKER_EXECDRIVER=native \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
-e DOCKER_GITCOMMIT=${GITCOMMIT} \
|
||||
-e TIMEOUT=120m dockerven:$GITCOMMIT \
|
||||
hack/validate/vendor
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .'
|
||||
}
|
||||
}
|
||||
stage("Integration-cli tests") {
|
||||
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating powerpc-integration-cli-bundles.tar.gz"
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-cli-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'powerpc-integration-cli-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('windowsRS1') {
|
||||
when {
|
||||
|
@ -288,8 +659,15 @@ pipeline {
|
|||
customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
}
|
||||
}
|
||||
stage("Run tests") {
|
||||
steps {
|
||||
withGithubStatus('windowsRS1') {
|
||||
powershell '''
|
||||
$ErrorActionPreference = 'Stop'
|
||||
.\\hack\\ci\\windows.ps1
|
||||
|
@ -298,6 +676,7 @@ pipeline {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('windowsRS5-process') {
|
||||
when {
|
||||
beforeAgent true
|
||||
|
@ -309,8 +688,15 @@ pipeline {
|
|||
customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
}
|
||||
}
|
||||
stage("Run tests") {
|
||||
steps {
|
||||
withGithubStatus('windowsRS5-process') {
|
||||
powershell '''
|
||||
$ErrorActionPreference = 'Stop'
|
||||
.\\hack\\ci\\windows.ps1
|
||||
|
@ -322,4 +708,5 @@ pipeline {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
30
Makefile
30
Makefile
|
@ -50,11 +50,18 @@ DOCKER_ENVS := \
|
|||
-e DOCKER_PORT \
|
||||
-e DOCKER_REMAP_ROOT \
|
||||
-e DOCKER_STORAGE_OPTS \
|
||||
-e DOCKER_TEST_HOST \
|
||||
-e DOCKER_USERLANDPROXY \
|
||||
-e DOCKERD_ARGS \
|
||||
-e TEST_INTEGRATION_DEST \
|
||||
-e TEST_INTEGRATION_DIR \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TESTDEBUG \
|
||||
-e TESTDIRS \
|
||||
-e TESTFLAGS \
|
||||
-e TESTFLAGS_INTEGRATION \
|
||||
-e TESTFLAGS_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO \
|
||||
-e VALIDATE_BRANCH \
|
||||
|
@ -105,9 +112,6 @@ export BUILD_APT_MIRROR
|
|||
|
||||
SWAGGER_DOCS_PORT ?= 9000
|
||||
|
||||
INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master)
|
||||
INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker)
|
||||
|
||||
define \n
|
||||
|
||||
|
||||
|
@ -182,8 +186,13 @@ test-docker-py: build ## run the docker-py tests
|
|||
|
||||
test-integration-cli: test-integration ## (DEPRECATED) use test-integration
|
||||
|
||||
ifneq ($(and $(TEST_SKIP_INTEGRATION),$(TEST_SKIP_INTEGRATION_CLI)),)
|
||||
test-integration:
|
||||
@echo Both integrations suites skipped per environment variables
|
||||
else
|
||||
test-integration: build ## run the integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
|
||||
endif
|
||||
|
||||
test-integration-flaky: build ## run the stress test for all new integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
|
||||
|
@ -212,18 +221,3 @@ swagger-docs: ## preview the API documentation
|
|||
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
|
||||
-p $(SWAGGER_DOCS_PORT):80 \
|
||||
bfirsh/redoc:1.6.2
|
||||
|
||||
build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel
|
||||
@echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)"
|
||||
go build -buildmode=pie -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
|
||||
@echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
|
||||
docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
|
||||
@echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)"
|
||||
$(eval tmp := integration-cli-worker-tmp)
|
||||
# We mount pkgcache, but not bundle (bundle needs to be baked into the image)
|
||||
# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here
|
||||
docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS --privileged $(DOCKER_IMAGE) top
|
||||
docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary
|
||||
docker exec $(tmp) go build -buildmode=pie -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker
|
||||
docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE)
|
||||
docker rm -f $(tmp)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Entrypoint for jenkins experimental CI
|
||||
set -eu -o pipefail
|
||||
|
||||
export DOCKER_EXPERIMENTAL=y
|
||||
export DOCKER_EXPERIMENTAL=1
|
||||
|
||||
hack/make.sh \
|
||||
binary-daemon \
|
||||
|
|
|
@ -4,15 +4,11 @@ set -eu -o pipefail
|
|||
|
||||
hack/validate/default
|
||||
hack/test/unit
|
||||
bash <(curl -s https://codecov.io/bash) \
|
||||
-f coverage.txt \
|
||||
-C "$GIT_SHA1" || \
|
||||
echo 'Codecov failed to upload'
|
||||
|
||||
hack/make.sh \
|
||||
binary-daemon \
|
||||
dynbinary \
|
||||
test-docker-py \
|
||||
test-integration-flaky \
|
||||
test-integration \
|
||||
cross \
|
||||
test-docker-py
|
||||
cross
|
||||
|
|
11
hack/dockerfile/install/gotestsum.installer
Executable file
11
hack/dockerfile/install/gotestsum.installer
Executable file
|
@ -0,0 +1,11 @@
|
|||
#!/bin/sh
|
||||
|
||||
GOTESTSUM_COMMIT='v0.3.5'
|
||||
|
||||
install_gotestsum() {
|
||||
echo "Installing gotestsum version $GOTESTSUM_COMMIT"
|
||||
go get -d gotest.tools/gotestsum
|
||||
cd "$GOPATH/src/gotest.tools/gotestsum"
|
||||
git checkout -q "$GOTESTSUM_COMMIT"
|
||||
go build -buildmode=pie -o "${PREFIX}/gotestsum" 'gotest.tools/gotestsum'
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
# Integration Testing on Swarm
|
||||
|
||||
IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
|
||||
|
||||
## Architecture
|
||||
|
||||
### Master service
|
||||
|
||||
- Works as a funker caller
|
||||
- Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
|
||||
|
||||
### Worker service
|
||||
|
||||
- Works as a funker callee
|
||||
- Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration` using the bind-mounted API socket (`docker.sock`)
|
||||
|
||||
### Client
|
||||
|
||||
- Controls master and workers via `docker stack`
|
||||
- No need to have a local daemon
|
||||
|
||||
Typically, the master and workers are supposed to be running on a cloud environment,
|
||||
while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
|
||||
|
||||
## Requirement
|
||||
|
||||
- Docker daemon 1.13 or later
|
||||
- Private registry for distributed execution with multiple nodes
|
||||
|
||||
## Usage
|
||||
|
||||
### Step 1: Prepare images
|
||||
|
||||
$ make build-integration-cli-on-swarm
|
||||
|
||||
Following environment variables are known to work in this step:
|
||||
|
||||
- `BUILDFLAGS`
|
||||
|
||||
Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
|
||||
|
||||
### Step 2: Execute tests
|
||||
|
||||
$ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
|
||||
|
||||
Following environment variables are known to work in this step:
|
||||
|
||||
- `DOCKER_GRAPHDRIVER`
|
||||
- `DOCKER_EXPERIMENTAL`
|
||||
|
||||
#### Flags
|
||||
|
||||
Basic flags:
|
||||
|
||||
- `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
|
||||
- `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
|
||||
- `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
|
||||
|
||||
Experimental flags for mitigating makespan nonuniformity:
|
||||
|
||||
- `-shuffle`: Shuffle the test filter strings
|
||||
|
||||
Flags for debugging IT on Swarm itself:
|
||||
|
||||
- `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
|
||||
- `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
|
||||
- `-dry-run`: skip the actual workload
|
||||
- `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm
|
|
@ -1,6 +0,0 @@
|
|||
# this Dockerfile is solely used for the master image.
|
||||
# Please refer to the top-level Makefile for the worker image.
|
||||
FROM golang:1.7
|
||||
ADD . /go/src/github.com/docker/docker/hack/integration-cli-on-swarm/agent
|
||||
RUN go build -buildmode=pie -o /master github.com/docker/docker/hack/integration-cli-on-swarm/agent/master
|
||||
ENTRYPOINT ["/master"]
|
|
@ -1,132 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/bfirsh/funker-go"
|
||||
"github.com/docker/docker/hack/integration-cli-on-swarm/agent/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// funkerRetryTimeout is for the issue https://github.com/bfirsh/funker/issues/3
|
||||
// When all the funker replicas are busy in their own job, we cannot connect to funker.
|
||||
funkerRetryTimeout = 1 * time.Hour
|
||||
funkerRetryDuration = 1 * time.Second
|
||||
)
|
||||
|
||||
// ticker is needed for some CI (e.g., on Travis, job is aborted when no output emitted for 10 minutes)
|
||||
func ticker(d time.Duration) chan struct{} {
|
||||
t := time.NewTicker(d)
|
||||
stop := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
log.Printf("tick (just for keeping CI job active) per %s", d.String())
|
||||
case <-stop:
|
||||
t.Stop()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return stop
|
||||
}
|
||||
|
||||
func executeTests(funkerName string, testChunks [][]string) error {
|
||||
tickerStopper := ticker(9*time.Minute + 55*time.Second)
|
||||
defer func() {
|
||||
close(tickerStopper)
|
||||
}()
|
||||
begin := time.Now()
|
||||
log.Printf("Executing %d chunks in parallel, against %q", len(testChunks), funkerName)
|
||||
var wg sync.WaitGroup
|
||||
var passed, failed uint32
|
||||
for chunkID, tests := range testChunks {
|
||||
log.Printf("Executing chunk %d (contains %d test filters)", chunkID, len(tests))
|
||||
wg.Add(1)
|
||||
go func(chunkID int, tests []string) {
|
||||
defer wg.Done()
|
||||
chunkBegin := time.Now()
|
||||
result, err := executeTestChunkWithRetry(funkerName, types.Args{
|
||||
ChunkID: chunkID,
|
||||
Tests: tests,
|
||||
})
|
||||
if result.RawLog != "" {
|
||||
for _, s := range strings.Split(result.RawLog, "\n") {
|
||||
log.Printf("Log (chunk %d): %s", chunkID, s)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Error while executing chunk %d: %v",
|
||||
chunkID, err)
|
||||
atomic.AddUint32(&failed, 1)
|
||||
} else {
|
||||
if result.Code == 0 {
|
||||
atomic.AddUint32(&passed, 1)
|
||||
} else {
|
||||
atomic.AddUint32(&failed, 1)
|
||||
}
|
||||
log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.",
|
||||
chunkID, passed+failed, len(testChunks), len(tests),
|
||||
time.Since(chunkBegin), result.Code)
|
||||
}
|
||||
}(chunkID, tests)
|
||||
}
|
||||
wg.Wait()
|
||||
// TODO: print actual tests rather than chunks
|
||||
log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.",
|
||||
len(testChunks), time.Since(begin), passed, failed)
|
||||
if failed > 0 {
|
||||
return fmt.Errorf("%d chunks failed", failed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func executeTestChunk(funkerName string, args types.Args) (types.Result, error) {
|
||||
ret, err := funker.Call(funkerName, args)
|
||||
if err != nil {
|
||||
return types.Result{}, err
|
||||
}
|
||||
tmp, err := json.Marshal(ret)
|
||||
if err != nil {
|
||||
return types.Result{}, err
|
||||
}
|
||||
var result types.Result
|
||||
err = json.Unmarshal(tmp, &result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) {
|
||||
begin := time.Now()
|
||||
for i := 0; time.Since(begin) < funkerRetryTimeout; i++ {
|
||||
result, err := executeTestChunk(funkerName, args)
|
||||
if err == nil {
|
||||
log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i)
|
||||
return result, nil
|
||||
}
|
||||
if errorSeemsInteresting(err) {
|
||||
log.Printf("Error while calling executeTestChunk(%q, %d), will retry (trial %d): %v",
|
||||
funkerName, args.ChunkID, i, err)
|
||||
}
|
||||
// TODO: non-constant sleep
|
||||
time.Sleep(funkerRetryDuration)
|
||||
}
|
||||
return types.Result{}, fmt.Errorf("could not call executeTestChunk(%q, %d) in %v", funkerName, args.ChunkID, funkerRetryTimeout)
|
||||
}
|
||||
|
||||
// errorSeemsInteresting returns true if err does not seem about https://github.com/bfirsh/funker/issues/3
|
||||
func errorSeemsInteresting(err error) bool {
|
||||
boringSubstrs := []string{"connection refused", "connection reset by peer", "no such host", "transport endpoint is not connected", "no route to host"}
|
||||
errS := err.Error()
|
||||
for _, boringS := range boringSubstrs {
|
||||
if strings.Contains(errS, boringS) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := xmain(); err != nil {
|
||||
log.Fatalf("fatal error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func xmain() error {
|
||||
workerService := flag.String("worker-service", "", "Name of worker service")
|
||||
chunks := flag.Int("chunks", 0, "Number of chunks")
|
||||
input := flag.String("input", "", "Path to input file")
|
||||
randSeed := flag.Int64("rand-seed", int64(0), "Random seed")
|
||||
shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity")
|
||||
flag.Parse()
|
||||
if *workerService == "" {
|
||||
return errors.New("worker-service unset")
|
||||
}
|
||||
if *chunks == 0 {
|
||||
return errors.New("chunks unset")
|
||||
}
|
||||
if *input == "" {
|
||||
return errors.New("input unset")
|
||||
}
|
||||
|
||||
tests, err := loadTests(*input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
testChunks := chunkTests(tests, *chunks, *shuffle, *randSeed)
|
||||
log.Printf("Loaded %d tests (%d chunks)", len(tests), len(testChunks))
|
||||
return executeTests(*workerService, testChunks)
|
||||
}
|
||||
|
||||
func chunkTests(tests []string, numChunks int, shuffle bool, randSeed int64) [][]string {
|
||||
// shuffling (experimental) mitigates makespan nonuniformity
|
||||
// Not sure this can cause some locality problem..
|
||||
if shuffle {
|
||||
shuffleStrings(tests, randSeed)
|
||||
}
|
||||
return chunkStrings(tests, numChunks)
|
||||
}
|
||||
|
||||
func loadTests(filename string) ([]string, error) {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tests []string
|
||||
for _, line := range strings.Split(string(b), "\n") {
|
||||
s := strings.TrimSpace(line)
|
||||
if s != "" {
|
||||
tests = append(tests, s)
|
||||
}
|
||||
}
|
||||
return tests, nil
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
// chunkStrings chunks the string slice
|
||||
func chunkStrings(x []string, numChunks int) [][]string {
|
||||
var result [][]string
|
||||
chunkSize := (len(x) + numChunks - 1) / numChunks
|
||||
for i := 0; i < len(x); i += chunkSize {
|
||||
ub := i + chunkSize
|
||||
if ub > len(x) {
|
||||
ub = len(x)
|
||||
}
|
||||
result = append(result, x[i:ub])
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// shuffleStrings shuffles strings
|
||||
func shuffleStrings(x []string, seed int64) {
|
||||
r := rand.New(rand.NewSource(seed))
|
||||
for i := range x {
|
||||
j := r.Intn(i + 1)
|
||||
x[i], x[j] = x[j], x[i]
|
||||
}
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func generateInput(inputLen int) []string {
|
||||
var input []string
|
||||
for i := 0; i < inputLen; i++ {
|
||||
input = append(input, fmt.Sprintf("s%d", i))
|
||||
}
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
func testChunkStrings(t *testing.T, inputLen, numChunks int) {
|
||||
t.Logf("inputLen=%d, numChunks=%d", inputLen, numChunks)
|
||||
input := generateInput(inputLen)
|
||||
result := chunkStrings(input, numChunks)
|
||||
t.Logf("result has %d chunks", len(result))
|
||||
var inputReconstructedFromResult []string
|
||||
for i, chunk := range result {
|
||||
t.Logf("chunk %d has %d elements", i, len(chunk))
|
||||
inputReconstructedFromResult = append(inputReconstructedFromResult, chunk...)
|
||||
}
|
||||
if !reflect.DeepEqual(input, inputReconstructedFromResult) {
|
||||
t.Fatal("input != inputReconstructedFromResult")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChunkStrings_4_4(t *testing.T) {
|
||||
testChunkStrings(t, 4, 4)
|
||||
}
|
||||
|
||||
func TestChunkStrings_4_1(t *testing.T) {
|
||||
testChunkStrings(t, 4, 1)
|
||||
}
|
||||
|
||||
func TestChunkStrings_1_4(t *testing.T) {
|
||||
testChunkStrings(t, 1, 4)
|
||||
}
|
||||
|
||||
func TestChunkStrings_1000_8(t *testing.T) {
|
||||
testChunkStrings(t, 1000, 8)
|
||||
}
|
||||
|
||||
func TestChunkStrings_1000_9(t *testing.T) {
|
||||
testChunkStrings(t, 1000, 9)
|
||||
}
|
||||
|
||||
func testShuffleStrings(t *testing.T, inputLen int, seed int64) {
|
||||
t.Logf("inputLen=%d, seed=%d", inputLen, seed)
|
||||
x := generateInput(inputLen)
|
||||
shuffleStrings(x, seed)
|
||||
t.Logf("shuffled: %v", x)
|
||||
}
|
||||
|
||||
func TestShuffleStrings_100(t *testing.T) {
|
||||
testShuffleStrings(t, 100, time.Now().UnixNano())
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package types
|
||||
|
||||
// Args is the type for funker args
|
||||
type Args struct {
|
||||
// ChunkID is an unique number of the chunk
|
||||
ChunkID int `json:"chunk_id"`
|
||||
// Tests is the set of the strings that are passed as `-check.f` filters
|
||||
Tests []string `json:"tests"`
|
||||
}
|
||||
|
||||
// Result is the type for funker result
|
||||
type Result struct {
|
||||
// ChunkID corresponds to Args.ChunkID
|
||||
ChunkID int `json:"chunk_id"`
|
||||
// Code is the exit code
|
||||
Code int `json:"code"`
|
||||
RawLog string `json:"raw_log"`
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
|
||||
github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
|
191
hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE
generated
vendored
191
hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE
generated
vendored
|
@ -1,191 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
50
hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go
generated
vendored
50
hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
package funker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Call a Funker function
|
||||
func Call(name string, args interface{}) (interface{}, error) {
|
||||
argsJSON, err := json.Marshal(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", name+":9999")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Keepalive is a workaround for docker/docker#29655 .
|
||||
// The implementation of FIN_WAIT2 seems weird on Swarm-mode.
|
||||
// It seems always refuseing any packet after 60 seconds.
|
||||
//
|
||||
// TODO: remove this workaround if the issue gets resolved on the Docker side
|
||||
if err := conn.SetKeepAlive(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := conn.SetKeepAlivePeriod(30 * time.Second); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = conn.Write(argsJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = conn.CloseWrite(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retJSON, err := ioutil.ReadAll(conn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ret interface{}
|
||||
err = json.Unmarshal(retJSON, &ret)
|
||||
return ret, err
|
||||
}
|
54
hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go
generated
vendored
54
hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go
generated
vendored
|
@ -1,54 +0,0 @@
|
|||
package funker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Handle a Funker function.
|
||||
func Handle(handler interface{}) error {
|
||||
handlerValue := reflect.ValueOf(handler)
|
||||
handlerType := handlerValue.Type()
|
||||
if handlerType.Kind() != reflect.Func || handlerType.NumIn() != 1 || handlerType.NumOut() != 1 {
|
||||
return fmt.Errorf("Handler must be a function with a single parameter and single return value.")
|
||||
}
|
||||
argsValue := reflect.New(handlerType.In(0))
|
||||
|
||||
listener, err := net.Listen("tcp", ":9999")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We close listener, because we only allow single request.
|
||||
// Note that TCP "backlog" cannot be used for that purpose.
|
||||
// http://www.perlmonks.org/?node_id=940662
|
||||
if err = listener.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
argsJSON, err := ioutil.ReadAll(conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(argsJSON, argsValue.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret := handlerValue.Call([]reflect.Value{argsValue.Elem()})[0].Interface()
|
||||
retJSON, err := json.Marshal(ret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = conn.Write(retJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return conn.Close()
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
)
|
||||
|
||||
// testChunkExecutor executes integration-cli binary.
|
||||
// image needs to be the worker image itself. testFlags are OR-set of regexp for filtering tests.
|
||||
type testChunkExecutor func(image string, tests []string) (int64, string, error)
|
||||
|
||||
func dryTestChunkExecutor() testChunkExecutor {
|
||||
return func(image string, tests []string) (int64, string, error) {
|
||||
return 0, fmt.Sprintf("DRY RUN (image=%q, tests=%v)", image, tests), nil
|
||||
}
|
||||
}
|
||||
|
||||
// privilegedTestChunkExecutor invokes a privileged container from the worker
|
||||
// service via bind-mounted API socket so as to execute the test chunk
|
||||
func privilegedTestChunkExecutor(autoRemove bool) testChunkExecutor {
|
||||
return func(image string, tests []string) (int64, string, error) {
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
// propagate variables from the host (needs to be defined in the compose file)
|
||||
experimental := os.Getenv("DOCKER_EXPERIMENTAL")
|
||||
graphdriver := os.Getenv("DOCKER_GRAPHDRIVER")
|
||||
if graphdriver == "" {
|
||||
info, err := cli.Info(context.Background())
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
graphdriver = info.Driver
|
||||
}
|
||||
// `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration`)
|
||||
// but it exists outside of `bundles` so as to make `$DOCKER_GRAPHDRIVER` work.
|
||||
//
|
||||
// Without this hack, `$DOCKER_GRAPHDRIVER` fails because of (e.g.) `overlay2 is not supported over overlayfs`
|
||||
//
|
||||
// see integration-cli/daemon/daemon.go
|
||||
daemonDest := "/daemon_dest"
|
||||
config := container.Config{
|
||||
Image: image,
|
||||
Env: []string{
|
||||
"TESTFLAGS=-check.f " + strings.Join(tests, "|"),
|
||||
"KEEPBUNDLE=1",
|
||||
"DOCKER_INTEGRATION_TESTS_VERIFIED=1", // for avoiding rebuilding integration-cli
|
||||
"DOCKER_EXPERIMENTAL=" + experimental,
|
||||
"DOCKER_GRAPHDRIVER=" + graphdriver,
|
||||
"DOCKER_INTEGRATION_DAEMON_DEST=" + daemonDest,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"org.dockerproject.integration-cli-on-swarm": "",
|
||||
"org.dockerproject.integration-cli-on-swarm.comment": "this non-service container is created for running privileged programs on Swarm. you can remove this container manually if the corresponding service is already stopped.",
|
||||
},
|
||||
Entrypoint: []string{"hack/dind"},
|
||||
Cmd: []string{"hack/make.sh", "test-integration"},
|
||||
}
|
||||
hostConfig := container.HostConfig{
|
||||
AutoRemove: autoRemove,
|
||||
Privileged: true,
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeVolume,
|
||||
Target: daemonDest,
|
||||
},
|
||||
},
|
||||
}
|
||||
id, stream, err := runContainer(context.Background(), cli, config, hostConfig)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
var b bytes.Buffer
|
||||
teeContainerStream(&b, os.Stdout, os.Stderr, stream)
|
||||
resultC, errC := cli.ContainerWait(context.Background(), id, "")
|
||||
select {
|
||||
case err := <-errC:
|
||||
return 0, "", err
|
||||
case result := <-resultC:
|
||||
return result.StatusCode, b.String(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runContainer(ctx context.Context, cli *client.Client, config container.Config, hostConfig container.HostConfig) (string, io.ReadCloser, error) {
|
||||
created, err := cli.ContainerCreate(context.Background(),
|
||||
&config, &hostConfig, nil, "")
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if err = cli.ContainerStart(ctx, created.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
stream, err := cli.ContainerLogs(ctx,
|
||||
created.ID,
|
||||
types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
})
|
||||
return created.ID, stream, err
|
||||
}
|
||||
|
||||
func teeContainerStream(w, stdout, stderr io.Writer, stream io.ReadCloser) {
|
||||
stdcopy.StdCopy(io.MultiWriter(w, stdout), io.MultiWriter(w, stderr), stream)
|
||||
stream.Close()
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/bfirsh/funker-go"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/hack/integration-cli-on-swarm/agent/types"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := xmain(); err != nil {
|
||||
log.Fatalf("fatal error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func validImageDigest(s string) bool {
|
||||
return reference.DigestRegexp.FindString(s) != ""
|
||||
}
|
||||
|
||||
func xmain() error {
|
||||
workerImageDigest := flag.String("worker-image-digest", "", "Needs to be the digest of this worker image itself")
|
||||
dryRun := flag.Bool("dry-run", false, "Dry run")
|
||||
keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm")
|
||||
flag.Parse()
|
||||
if !validImageDigest(*workerImageDigest) {
|
||||
// Because of issue #29582.
|
||||
// `docker service create localregistry.example.com/blahblah:latest` pulls the image data to local, but not a tag.
|
||||
// So, `docker run localregistry.example.com/blahblah:latest` fails: `Unable to find image 'localregistry.example.com/blahblah:latest' locally`
|
||||
return fmt.Errorf("worker-image-digest must be a digest, got %q", *workerImageDigest)
|
||||
}
|
||||
executor := privilegedTestChunkExecutor(!*keepExecutor)
|
||||
if *dryRun {
|
||||
executor = dryTestChunkExecutor()
|
||||
}
|
||||
return handle(*workerImageDigest, executor)
|
||||
}
|
||||
|
||||
func handle(workerImageDigest string, executor testChunkExecutor) error {
|
||||
log.Print("Waiting for a funker request")
|
||||
return funker.Handle(func(args *types.Args) types.Result {
|
||||
log.Printf("Executing chunk %d, contains %d test filters",
|
||||
args.ChunkID, len(args.Tests))
|
||||
begin := time.Now()
|
||||
code, rawLog, err := executor(workerImageDigest, args.Tests)
|
||||
if err != nil {
|
||||
log.Printf("Error while executing chunk %d: %v", args.ChunkID, err)
|
||||
if code == 0 {
|
||||
// Make sure this is a failure
|
||||
code = 1
|
||||
}
|
||||
return types.Result{
|
||||
ChunkID: args.ChunkID,
|
||||
Code: int(code),
|
||||
RawLog: rawLog,
|
||||
}
|
||||
}
|
||||
elapsed := time.Since(begin)
|
||||
log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed)
|
||||
return types.Result{
|
||||
ChunkID: args.ChunkID,
|
||||
Code: int(code),
|
||||
RawLog: rawLog,
|
||||
}
|
||||
})
|
||||
}
|
|
@ -1,122 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
const composeTemplate = `# generated by integration-cli-on-swarm
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
worker:
|
||||
image: "{{.WorkerImage}}"
|
||||
command: ["-worker-image-digest={{.WorkerImageDigest}}", "-dry-run={{.DryRun}}", "-keep-executor={{.KeepExecutor}}"]
|
||||
networks:
|
||||
- net
|
||||
volumes:
|
||||
# Bind-mount the API socket so that we can invoke "docker run --privileged" within the service containers
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- DOCKER_GRAPHDRIVER={{.EnvDockerGraphDriver}}
|
||||
- DOCKER_EXPERIMENTAL={{.EnvDockerExperimental}}
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: {{.Replicas}}
|
||||
restart_policy:
|
||||
# The restart condition needs to be any for funker function
|
||||
condition: any
|
||||
|
||||
master:
|
||||
image: "{{.MasterImage}}"
|
||||
command: ["-worker-service=worker", "-input=/mnt/input", "-chunks={{.Chunks}}", "-shuffle={{.Shuffle}}", "-rand-seed={{.RandSeed}}"]
|
||||
networks:
|
||||
- net
|
||||
volumes:
|
||||
- {{.Volume}}:/mnt
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: none
|
||||
placement:
|
||||
# Make sure the master can access the volume
|
||||
constraints: [node.id == {{.SelfNodeID}}]
|
||||
|
||||
networks:
|
||||
net:
|
||||
|
||||
volumes:
|
||||
{{.Volume}}:
|
||||
external: true
|
||||
`
|
||||
|
||||
type composeOptions struct {
|
||||
Replicas int
|
||||
Chunks int
|
||||
MasterImage string
|
||||
WorkerImage string
|
||||
Volume string
|
||||
Shuffle bool
|
||||
RandSeed int64
|
||||
DryRun bool
|
||||
KeepExecutor bool
|
||||
}
|
||||
|
||||
type composeTemplateOptions struct {
|
||||
composeOptions
|
||||
WorkerImageDigest string
|
||||
SelfNodeID string
|
||||
EnvDockerGraphDriver string
|
||||
EnvDockerExperimental string
|
||||
}
|
||||
|
||||
// createCompose creates "dir/docker-compose.yml".
|
||||
// If dir is empty, TempDir() is used.
|
||||
func createCompose(dir string, cli *client.Client, opts composeOptions) (string, error) {
|
||||
if dir == "" {
|
||||
var err error
|
||||
dir, err = ioutil.TempDir("", "integration-cli-on-swarm-")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
resolved := composeTemplateOptions{}
|
||||
resolved.composeOptions = opts
|
||||
workerImageInspect, _, err := cli.ImageInspectWithRaw(context.Background(), defaultWorkerImageName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(workerImageInspect.RepoDigests) > 0 {
|
||||
resolved.WorkerImageDigest = workerImageInspect.RepoDigests[0]
|
||||
} else {
|
||||
// fall back for non-pushed image
|
||||
resolved.WorkerImageDigest = workerImageInspect.ID
|
||||
}
|
||||
info, err := cli.Info(context.Background())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resolved.SelfNodeID = info.Swarm.NodeID
|
||||
resolved.EnvDockerGraphDriver = os.Getenv("DOCKER_GRAPHDRIVER")
|
||||
resolved.EnvDockerExperimental = os.Getenv("DOCKER_EXPERIMENTAL")
|
||||
composeFilePath := filepath.Join(dir, "docker-compose.yml")
|
||||
tmpl, err := template.New("").Parse(composeTemplate)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
f, err := os.Create(composeFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
if err = tmpl.Execute(f, resolved); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return composeFilePath, nil
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
func system(commands [][]string) error {
|
||||
for _, c := range commands {
|
||||
cmd := exec.Command(c[0], c[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pushImage(_ *client.Client, remote, local string) error {
|
||||
// FIXME: eliminate os/exec (but it is hard to pass auth without os/exec ...)
|
||||
return system([][]string{
|
||||
{"docker", "image", "tag", local, remote},
|
||||
{"docker", "image", "push", remote},
|
||||
})
|
||||
}
|
||||
|
||||
func deployStack(_ *client.Client, stackName, composeFilePath string) error {
|
||||
// FIXME: eliminate os/exec (but stack is implemented in CLI ...)
|
||||
return system([][]string{
|
||||
{"docker", "stack", "deploy",
|
||||
"--compose-file", composeFilePath,
|
||||
"--with-registry-auth",
|
||||
stackName},
|
||||
})
|
||||
}
|
||||
|
||||
func hasStack(_ *client.Client, stackName string) bool {
|
||||
// FIXME: eliminate os/exec (but stack is implemented in CLI ...)
|
||||
out, err := exec.Command("docker", "stack", "ls").CombinedOutput()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("`docker stack ls` failed with: %s", string(out)))
|
||||
}
|
||||
// FIXME: not accurate
|
||||
return strings.Contains(string(out), stackName)
|
||||
}
|
||||
|
||||
func removeStack(_ *client.Client, stackName string) error {
|
||||
// FIXME: eliminate os/exec (but stack is implemented in CLI ...)
|
||||
if err := system([][]string{
|
||||
{"docker", "stack", "rm", stackName},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME
|
||||
time.Sleep(10 * time.Second)
|
||||
return nil
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var testFuncRegexp *regexp.Regexp
|
||||
|
||||
func init() {
|
||||
testFuncRegexp = regexp.MustCompile(`(?m)^\s*func\s+\(\w*\s*\*(\w+Suite)\)\s+(Test\w+)`)
|
||||
}
|
||||
|
||||
func enumerateTestsForBytes(b []byte) ([]string, error) {
|
||||
var tests []string
|
||||
submatches := testFuncRegexp.FindAllSubmatch(b, -1)
|
||||
for _, submatch := range submatches {
|
||||
if len(submatch) == 3 {
|
||||
tests = append(tests, fmt.Sprintf("%s.%s$", submatch[1], submatch[2]))
|
||||
}
|
||||
}
|
||||
return tests, nil
|
||||
}
|
||||
|
||||
// enumerateTests enumerates valid `-check.f` strings for all the test functions.
|
||||
// Note that we use regexp rather than parsing Go files for performance reason.
|
||||
// (Try `TESTFLAGS=-check.list make test-integration` to see the slowness of parsing)
|
||||
// The files needs to be `gofmt`-ed
|
||||
//
|
||||
// The result will be as follows, but unsorted ('$' is appended because they are regexp for `-check.f`):
|
||||
// "DockerAuthzSuite.TestAuthZPluginAPIDenyResponse$"
|
||||
// "DockerAuthzSuite.TestAuthZPluginAllowEventStream$"
|
||||
// ...
|
||||
// "DockerTrustedSwarmSuite.TestTrustedServiceUpdate$"
|
||||
func enumerateTests(wd string) ([]string, error) {
|
||||
testGoFiles, err := filepath.Glob(filepath.Join(wd, "integration-cli", "*_test.go"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var allTests []string
|
||||
for _, testGoFile := range testGoFiles {
|
||||
b, err := ioutil.ReadFile(testGoFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tests, err := enumerateTestsForBytes(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allTests = append(allTests, tests...)
|
||||
}
|
||||
return allTests, nil
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func getRepoTopDir(t *testing.T) string {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wd = filepath.Clean(wd)
|
||||
suffix := "hack/integration-cli-on-swarm/host"
|
||||
if !strings.HasSuffix(wd, suffix) {
|
||||
t.Skipf("cwd seems strange (needs to have suffix %s): %v", suffix, wd)
|
||||
}
|
||||
return filepath.Clean(filepath.Join(wd, "../../.."))
|
||||
}
|
||||
|
||||
func TestEnumerateTests(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
tests, err := enumerateTests(getRepoTopDir(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sort.Strings(tests)
|
||||
t.Logf("enumerated %d test filter strings:", len(tests))
|
||||
for _, s := range tests {
|
||||
t.Logf("- %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnumerateTestsForBytes(t *testing.T) {
|
||||
b := []byte(`package main
|
||||
import (
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
func (s *FooSuite) TestA(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *FooSuite) TestAAA(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *BarSuite) TestBar(c *check.C) {
|
||||
}
|
||||
|
||||
func (x *FooSuite) TestC(c *check.C) {
|
||||
}
|
||||
|
||||
func (*FooSuite) TestD(c *check.C) {
|
||||
}
|
||||
|
||||
// should not be counted
|
||||
func (s *FooSuite) testE(c *check.C) {
|
||||
}
|
||||
|
||||
// counted, although we don't support ungofmt file
|
||||
func (s *FooSuite) TestF (c *check.C){}
|
||||
`)
|
||||
expected := []string{
|
||||
"FooSuite.TestA$",
|
||||
"FooSuite.TestAAA$",
|
||||
"BarSuite.TestBar$",
|
||||
"FooSuite.TestC$",
|
||||
"FooSuite.TestD$",
|
||||
"FooSuite.TestF$",
|
||||
}
|
||||
|
||||
actual, err := enumerateTestsForBytes(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Fatalf("expected %q, got %q", expected, actual)
|
||||
}
|
||||
}
|
|
@ -1,198 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStackName = "integration-cli-on-swarm"
|
||||
defaultVolumeName = "integration-cli-on-swarm"
|
||||
defaultMasterImageName = "integration-cli-master"
|
||||
defaultWorkerImageName = "integration-cli-worker"
|
||||
)
|
||||
|
||||
func main() {
|
||||
rc, err := xmain()
|
||||
if err != nil {
|
||||
logrus.Fatalf("fatal error: %v", err)
|
||||
}
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
func xmain() (int, error) {
|
||||
// Should we use cobra maybe?
|
||||
replicas := flag.Int("replicas", 1, "Number of worker service replica")
|
||||
chunks := flag.Int("chunks", 0, "Number of test chunks executed in batch (0 == replicas)")
|
||||
pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distributed execution. (empty == not to push)")
|
||||
shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity")
|
||||
// flags below are rarely used
|
||||
randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == current time)")
|
||||
filtersFile := flag.String("filters-file", "", "Path to optional file composed of `-check.f` filter strings")
|
||||
dryRun := flag.Bool("dry-run", false, "Dry run")
|
||||
keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm")
|
||||
flag.Parse()
|
||||
if *chunks == 0 {
|
||||
*chunks = *replicas
|
||||
}
|
||||
if *randSeed == int64(0) {
|
||||
*randSeed = time.Now().UnixNano()
|
||||
}
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
if hasStack(cli, defaultStackName) {
|
||||
logrus.Infof("Removing stack %s", defaultStackName)
|
||||
removeStack(cli, defaultStackName)
|
||||
}
|
||||
if hasVolume(cli, defaultVolumeName) {
|
||||
logrus.Infof("Removing volume %s", defaultVolumeName)
|
||||
removeVolume(cli, defaultVolumeName)
|
||||
}
|
||||
if err = ensureImages(cli, []string{defaultWorkerImageName, defaultMasterImageName}); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
workerImageForStack := defaultWorkerImageName
|
||||
if *pushWorkerImage != "" {
|
||||
logrus.Infof("Pushing %s to %s", defaultWorkerImageName, *pushWorkerImage)
|
||||
if err = pushImage(cli, *pushWorkerImage, defaultWorkerImageName); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
workerImageForStack = *pushWorkerImage
|
||||
}
|
||||
compose, err := createCompose("", cli, composeOptions{
|
||||
Replicas: *replicas,
|
||||
Chunks: *chunks,
|
||||
MasterImage: defaultMasterImageName,
|
||||
WorkerImage: workerImageForStack,
|
||||
Volume: defaultVolumeName,
|
||||
Shuffle: *shuffle,
|
||||
RandSeed: *randSeed,
|
||||
DryRun: *dryRun,
|
||||
KeepExecutor: *keepExecutor,
|
||||
})
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
filters, err := filtersBytes(*filtersFile)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
logrus.Infof("Creating volume %s with input data", defaultVolumeName)
|
||||
if err = createVolumeWithData(cli,
|
||||
defaultVolumeName,
|
||||
map[string][]byte{"/input": filters},
|
||||
defaultMasterImageName); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
logrus.Infof("Deploying stack %s from %s", defaultStackName, compose)
|
||||
defer func() {
|
||||
logrus.Info("NOTE: You may want to inspect or clean up following resources:")
|
||||
logrus.Infof(" - Stack: %s", defaultStackName)
|
||||
logrus.Infof(" - Volume: %s", defaultVolumeName)
|
||||
logrus.Infof(" - Compose file: %s", compose)
|
||||
logrus.Infof(" - Master image: %s", defaultMasterImageName)
|
||||
logrus.Infof(" - Worker image: %s", workerImageForStack)
|
||||
}()
|
||||
if err = deployStack(cli, defaultStackName, compose); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
logrus.Infof("The log will be displayed here after some duration."+
|
||||
"You can watch the live status via `docker service logs %s_worker`",
|
||||
defaultStackName)
|
||||
masterContainerID, err := waitForMasterUp(cli, defaultStackName)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
rc, err := waitForContainerCompletion(cli, os.Stdout, os.Stderr, masterContainerID)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
logrus.Infof("Exit status: %d", rc)
|
||||
return int(rc), nil
|
||||
}
|
||||
|
||||
func ensureImages(cli *client.Client, images []string) error {
|
||||
for _, image := range images {
|
||||
_, _, err := cli.ImageInspectWithRaw(context.Background(), image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find image %s, please run `make build-integration-cli-on-swarm`: %v",
|
||||
image, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func filtersBytes(optionalFiltersFile string) ([]byte, error) {
|
||||
var b []byte
|
||||
if optionalFiltersFile == "" {
|
||||
tests, err := enumerateTests(".")
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
b = []byte(strings.Join(tests, "\n") + "\n")
|
||||
} else {
|
||||
var err error
|
||||
b, err = ioutil.ReadFile(optionalFiltersFile)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func waitForMasterUp(cli *client.Client, stackName string) (string, error) {
|
||||
// FIXME(AkihiroSuda): it should retry until master is up, rather than pre-sleeping
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fil := filters.NewArgs()
|
||||
fil.Add("label", "com.docker.stack.namespace="+stackName)
|
||||
// FIXME(AkihiroSuda): we should not rely on internal service naming convention
|
||||
fil.Add("label", "com.docker.swarm.service.name="+stackName+"_master")
|
||||
masters, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
All: true,
|
||||
Filters: fil,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(masters) == 0 {
|
||||
return "", fmt.Errorf("master not running in stack %s", stackName)
|
||||
}
|
||||
return masters[0].ID, nil
|
||||
}
|
||||
|
||||
func waitForContainerCompletion(cli *client.Client, stdout, stderr io.Writer, containerID string) (int64, error) {
|
||||
stream, err := cli.ContainerLogs(context.Background(),
|
||||
containerID,
|
||||
types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
})
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
stdcopy.StdCopy(stdout, stderr, stream)
|
||||
stream.Close()
|
||||
resultC, errC := cli.ContainerWait(context.Background(), containerID, "")
|
||||
select {
|
||||
case err := <-errC:
|
||||
return 1, err
|
||||
case result := <-resultC:
|
||||
return result.StatusCode, nil
|
||||
}
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
func createTar(data map[string][]byte) (io.Reader, error) {
|
||||
var b bytes.Buffer
|
||||
tw := tar.NewWriter(&b)
|
||||
for path, datum := range data {
|
||||
hdr := tar.Header{
|
||||
Name: path,
|
||||
Mode: 0644,
|
||||
Size: int64(len(datum)),
|
||||
}
|
||||
if err := tw.WriteHeader(&hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err := tw.Write(datum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b, nil
|
||||
}
|
||||
|
||||
// createVolumeWithData creates a volume with the given data (e.g. data["/foo"] = []byte("bar"))
|
||||
// Internally, a container is created from the image so as to provision the data to the volume,
|
||||
// which is attached to the container.
|
||||
func createVolumeWithData(cli *client.Client, volumeName string, data map[string][]byte, image string) error {
|
||||
_, err := cli.VolumeCreate(context.Background(),
|
||||
volume.VolumeCreateBody{
|
||||
Driver: "local",
|
||||
Name: volumeName,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mnt := "/mnt"
|
||||
miniContainer, err := cli.ContainerCreate(context.Background(),
|
||||
&container.Config{
|
||||
Image: image,
|
||||
},
|
||||
&container.HostConfig{
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeVolume,
|
||||
Source: volumeName,
|
||||
Target: mnt,
|
||||
},
|
||||
},
|
||||
}, nil, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr, err := createTar(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cli.CopyToContainer(context.Background(),
|
||||
miniContainer.ID, mnt, tr, types.CopyToContainerOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return cli.ContainerRemove(context.Background(),
|
||||
miniContainer.ID,
|
||||
types.ContainerRemoveOptions{})
|
||||
}
|
||||
|
||||
func hasVolume(cli *client.Client, volumeName string) bool {
|
||||
_, err := cli.VolumeInspect(context.Background(), volumeName)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func removeVolume(cli *client.Client, volumeName string) error {
|
||||
return cli.VolumeRemove(context.Background(), volumeName, true)
|
||||
}
|
|
@ -327,19 +327,16 @@ Function Run-UnitTests() {
|
|||
# Run the integration tests
|
||||
Function Run-IntegrationTests() {
|
||||
$env:DOCKER_INTEGRATION_DAEMON_DEST = $root + "\bundles\tmp"
|
||||
$dirs = Get-ChildItem -Path integration -Directory -Recurse
|
||||
$dirs = go list -test -f '{{- if ne .ForTest `"`" -}}{{- .Dir -}}{{- end -}}' .\integration\...
|
||||
$integration_api_dirs = @()
|
||||
ForEach($dir in $dirs) {
|
||||
$RelativePath = "." + $dir.FullName -replace "$($PWD.Path -replace "\\","\\")",""
|
||||
If ($RelativePath -notmatch '(^.\\integration($|\\internal)|\\testdata)') {
|
||||
$integration_api_dirs += $dir
|
||||
Write-Host "Building test suite binary $RelativePath"
|
||||
go test -c -o "$RelativePath\test.exe" $RelativePath
|
||||
}
|
||||
Write-Host "Building test suite binary $dir"
|
||||
go test -c -o "$dir\test.exe" $dir
|
||||
}
|
||||
|
||||
ForEach($dir in $integration_api_dirs) {
|
||||
Set-Location $dir.FullName
|
||||
Set-Location $dir
|
||||
Write-Host "Running $($PWD.Path)"
|
||||
$pinfo = New-Object System.Diagnostics.ProcessStartInfo
|
||||
$pinfo.FileName = "$($PWD.Path)\test.exe"
|
||||
|
|
|
@ -150,15 +150,6 @@ ORIG_BUILDFLAGS=( -tags "autogen netgo osusergo static_build $DOCKER_BUILDTAGS"
|
|||
|
||||
BUILDFLAGS=( ${BUILDFLAGS} "${ORIG_BUILDFLAGS[@]}" )
|
||||
|
||||
# Test timeout.
|
||||
if [ "${DOCKER_ENGINE_GOARCH}" == "arm64" ] || [ "${DOCKER_ENGINE_GOARCH}" == "arm" ]; then
|
||||
: ${TIMEOUT:=10m}
|
||||
elif [ "${DOCKER_ENGINE_GOARCH}" == "windows" ]; then
|
||||
: ${TIMEOUT:=8m}
|
||||
else
|
||||
: ${TIMEOUT:=5m}
|
||||
fi
|
||||
|
||||
LDFLAGS_STATIC_DOCKER="
|
||||
$LDFLAGS_STATIC
|
||||
-extldflags \"$EXTLDFLAGS_STATIC\"
|
||||
|
|
|
@ -10,6 +10,8 @@ if [ ! "$(go env GOOS)" = 'windows' ]; then
|
|||
if ! wait "$pid"; then
|
||||
echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code"
|
||||
fi
|
||||
root=$(dirname "$pidFile")/root
|
||||
umount "$root" || true
|
||||
done
|
||||
|
||||
if [ -z "$DOCKER_TEST_HOST" ]; then
|
||||
|
|
|
@ -5,6 +5,18 @@
|
|||
#
|
||||
# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration
|
||||
#
|
||||
|
||||
if [[ "${TESTFLAGS}" = *-check.f* ]]; then
|
||||
echo Skipping integration tests since TESTFLAGS includes integration-cli only flags
|
||||
TEST_SKIP_INTEGRATION=1
|
||||
fi
|
||||
|
||||
if [[ "${TESTFLAGS}" = *-test.run* ]]; then
|
||||
echo Skipping integration-cli tests since TESTFLAGS includes integration only flags
|
||||
TEST_SKIP_INTEGRATION_CLI=1
|
||||
fi
|
||||
|
||||
|
||||
if [ -z ${MAKEDIR} ]; then
|
||||
export MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
fi
|
||||
|
@ -15,21 +27,24 @@ source "$MAKEDIR/.go-autogen"
|
|||
: ${TESTFLAGS:=}
|
||||
: ${TESTDEBUG:=}
|
||||
|
||||
integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(
|
||||
find ./integration -type d |
|
||||
grep -vE '(^./integration($|/internal)|/testdata)')"}
|
||||
integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)"}
|
||||
|
||||
run_test_integration() {
|
||||
[[ "$TESTFLAGS" != *-check.f* ]] && run_test_integration_suites
|
||||
set_platform_timeout
|
||||
if [ -z "${TEST_SKIP_INTEGRATION}" ]; then
|
||||
run_test_integration_suites
|
||||
fi
|
||||
if [ -z "${TEST_SKIP_INTEGRATION_CLI}" ]; then
|
||||
run_test_integration_legacy_suites
|
||||
fi
|
||||
}
|
||||
|
||||
run_test_integration_suites() {
|
||||
local flags="-test.v -test.timeout=${TIMEOUT} $TESTFLAGS"
|
||||
local flags="-test.v -test.timeout=${TIMEOUT} $TESTFLAGS ${TESTFLAGS_INTEGRATION}"
|
||||
for dir in ${integration_api_dirs}; do
|
||||
if ! (
|
||||
cd "$dir"
|
||||
echo "Running $PWD"
|
||||
echo "Running $PWD flags=${flags}"
|
||||
test_env ./test.main ${flags}
|
||||
); then exit 1; fi
|
||||
done
|
||||
|
@ -37,9 +52,9 @@ run_test_integration_suites() {
|
|||
|
||||
run_test_integration_legacy_suites() {
|
||||
(
|
||||
flags="-check.v -check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS"
|
||||
flags="-check.v -check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS ${TESTFLAGS_INTEGRATION_CLI}"
|
||||
cd integration-cli
|
||||
echo "Running $PWD"
|
||||
echo "Running $PWD flags=${flags}"
|
||||
test_env ./test.main $flags
|
||||
)
|
||||
}
|
||||
|
@ -49,10 +64,14 @@ build_test_suite_binaries() {
|
|||
echo "Skipping building test binaries; as DOCKER_INTEGRATION_TESTS_VERIFIED is set"
|
||||
return
|
||||
fi
|
||||
if [ -z "${TEST_SKIP_INTEGRATION_CLI}" ]; then
|
||||
build_test_suite_binary ./integration-cli "test.main"
|
||||
fi
|
||||
if [ -z "${TEST_SKIP_INTEGRATION}" ]; then
|
||||
for dir in ${integration_api_dirs}; do
|
||||
build_test_suite_binary "$dir" "test.main"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# Build a binary for a test suite package
|
||||
|
@ -105,9 +124,8 @@ test_env() {
|
|||
)
|
||||
}
|
||||
|
||||
|
||||
error_on_leaked_containerd_shims() {
|
||||
if [ "$(go env GOOS)" == 'windows' ]; then
|
||||
if [ "$(go env GOOS)" = 'windows' ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
|
@ -120,3 +138,23 @@ error_on_leaked_containerd_shims() {
|
|||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
set_platform_timeout() {
|
||||
# Test timeout.
|
||||
if [ "${DOCKER_ENGINE_GOARCH}" = "arm64" ] || [ "${DOCKER_ENGINE_GOARCH}" = "arm" ]; then
|
||||
: ${TIMEOUT:=10m}
|
||||
elif [ "${DOCKER_ENGINE_GOARCH}" = "windows" ]; then
|
||||
: ${TIMEOUT:=8m}
|
||||
else
|
||||
: ${TIMEOUT:=5m}
|
||||
fi
|
||||
|
||||
if [ "${TEST_REPEAT}" -gt 1 ]; then
|
||||
# TIMEOUT needs to take TEST_REPEAT into account, or a premature time out may happen.
|
||||
# The following ugliness will:
|
||||
# - remove last character (usually 'm' from '10m')
|
||||
# - multiply by testcount
|
||||
# - add last character back
|
||||
TIMEOUT=$((${TIMEOUT::-1} * ${TEST_REPEAT}))${TIMEOUT:$((${#TIMEOUT}-1)):1}
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
# required by `make build-integration-cli-on-swarm`
|
||||
# required by https://github.com/AkihiroSuda/kube-moby-integration
|
||||
set -e
|
||||
|
||||
source hack/make/.integration-test-helpers
|
||||
|
|
|
@ -3,7 +3,6 @@ set -e
|
|||
|
||||
# This script exists as backwards compatibility for CI
|
||||
(
|
||||
|
||||
DEST="${DEST}-daemon"
|
||||
ABS_DEST="${ABS_DEST}-daemon"
|
||||
. hack/make/dynbinary-daemon
|
||||
|
|
|
@ -3,18 +3,62 @@ set -e
|
|||
|
||||
source hack/make/.integration-test-helpers
|
||||
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
# The commit or tag to use for testing
|
||||
# TODO docker 17.06 cli client used in CI fails to build using a sha;
|
||||
# unable to prepare context: unable to 'git clone' to temporary context directory: error fetching: error: no such remote ref ead0bb9e08c13dd3d1712759491eee06bf5a5602
|
||||
#: exit status 128
|
||||
: "${DOCKER_PY_COMMIT:=4.0.2}"
|
||||
|
||||
# custom options to pass py.test
|
||||
# TODO remove these skip once we update to a docker-py version that has https://github.com/docker/docker-py/pull/2369, https://github.com/docker/docker-py/pull/2380, https://github.com/docker/docker-py/pull/2382
|
||||
: "${PY_TEST_OPTIONS:=\
|
||||
--deselect=tests/integration/api_swarm_test.py::SwarmTest::test_init_swarm_data_path_addr \
|
||||
--deselect=tests/integration/api_exec_test.py::ExecTest::test_detach_with_arg \
|
||||
--deselect=tests/integration/api_exec_test.py::ExecDemuxTest::test_exec_command_tty_stream_no_demux \
|
||||
--deselect=tests/integration/api_build_test.py::BuildTest::test_build_invalid_platform \
|
||||
--deselect=tests/integration/api_image_test.py::PullImageTest::test_pull_invalid_platform \
|
||||
--junitxml=${DEST}/junit-report.xml \
|
||||
}"
|
||||
(
|
||||
bundle .integration-daemon-start
|
||||
|
||||
dockerPy='/docker-py'
|
||||
[ -d "$dockerPy" ] || {
|
||||
dockerPy="$DEST/docker-py"
|
||||
git clone https://github.com/docker/docker-py.git "$dockerPy"
|
||||
}
|
||||
docker_host_scheme=$(echo "${DOCKER_HOST}" | cut -d: -f1 -)
|
||||
|
||||
# exporting PYTHONPATH to import "docker" from our local docker-py
|
||||
test_env PYTHONPATH="$dockerPy" py.test --junitxml="$DEST/results.xml" "$dockerPy/tests/integration"
|
||||
case "${docker_host_scheme}" in
|
||||
unix)
|
||||
# trim the tcp:// scheme, and bind-mount the docker socket into the container
|
||||
run_opts="--mount type=bind,src=${DOCKER_HOST#unix://},dst=/var/run/docker.sock"
|
||||
;;
|
||||
|
||||
tcp)
|
||||
# run container in host-mode networking so that it can connect to the
|
||||
# daemon from the current networking namespace (e.g., to connect to localhost)
|
||||
run_opts="--network=host -e DOCKER_HOST=${DOCKER_HOST}"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "WARN: Skipping test-docker-py: connecting to docker daemon using ${docker_host_scheme} (${DOCKER_HOST}) not supported"
|
||||
bundle .integration-daemon-stop
|
||||
return 0
|
||||
esac
|
||||
|
||||
docker_py_image="docker-sdk-python3:${DOCKER_PY_COMMIT}"
|
||||
if ! docker image inspect "dockerPyImage" &> /dev/null; then
|
||||
echo INFO: Building ${docker_py_image}...
|
||||
(
|
||||
[ -n "${TESTDEBUG}" ] && set -x
|
||||
[ -z "${TESTDEBUG}" ] && build_opts="--quiet"
|
||||
[ -f /.dockerenv ] || build_opts="${build_opts} --network=host"
|
||||
# shellcheck disable=SC2086
|
||||
exec docker build ${build_opts} -t "${docker_py_image}" -f tests/Dockerfile "https://github.com/docker/docker-py.git#${DOCKER_PY_COMMIT}"
|
||||
)
|
||||
fi
|
||||
|
||||
echo INFO: Starting docker-py tests...
|
||||
(
|
||||
[ -n "${TESTDEBUG}" ] && set -x
|
||||
# shellcheck disable=SC2086,SC2140
|
||||
exec docker run --rm ${run_opts} --mount type=bind,"src=${ABS_DEST}","dst=/src/${DEST}" "${docker_py_image}" pytest ${PY_TEST_OPTIONS} tests/integration
|
||||
)
|
||||
bundle .integration-daemon-stop
|
||||
) 2>&1 | tee -a "$DEST/test.log"
|
||||
|
|
|
@ -1,9 +1,21 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e -o pipefail
|
||||
|
||||
if [ -n "$TEST_INTEGRATION_DEST" ]; then
|
||||
export DEST="$ABS_DEST/$TEST_INTEGRATION_DEST"
|
||||
export DOCKER_INTEGRATION_DAEMON_DEST="$DEST"
|
||||
mkdir -p "$DEST"
|
||||
fi
|
||||
|
||||
source hack/make/.integration-test-helpers
|
||||
|
||||
if [ ! -z "${TEST_SKIP_INTEGRATION}" ] && [ ! -z "${TEST_SKIP_INTEGRATION_CLI}" ]; then
|
||||
echo integration and integration-cli skipped according to env vars
|
||||
exit 0
|
||||
fi
|
||||
|
||||
(
|
||||
env
|
||||
build_test_suite_binaries
|
||||
bundle .integration-daemon-start
|
||||
bundle .integration-daemon-setup
|
||||
|
|
|
@ -20,18 +20,10 @@ echo "Running stress test for them."
|
|||
(
|
||||
TESTARRAY=$(echo "$new_tests" | sed 's/+func //' | awk -F'\\(' '{print $1}' | tr '\n' '|')
|
||||
# Note: TEST_REPEAT will make the test suite run 5 times, restarting the daemon
|
||||
# whereas testcount will make each test run 5 times in a row under the same daemon.
|
||||
# and each test will run 5 times in a row under the same daemon.
|
||||
# This will make a total of 25 runs for each test in TESTARRAY.
|
||||
export TEST_REPEAT=5
|
||||
local testcount=5
|
||||
# However, TIMEOUT needs to take testcount into account, or a premature time out may happen.
|
||||
# The following ugliness will:
|
||||
# - remove last character (usually 'm' from '10m')
|
||||
# - multiply by testcount
|
||||
# - add last character back
|
||||
export TIMEOUT=$((${TIMEOUT::-1} * $testcount))${TIMEOUT:$((${#TIMEOUT}-1)):1}
|
||||
|
||||
export TESTFLAGS="-test.count $testcount -test.run ${TESTARRAY%?}"
|
||||
export TESTFLAGS="-test.count ${TEST_REPEAT} -test.run ${TESTARRAY%?}"
|
||||
echo "Using test flags: $TESTFLAGS"
|
||||
source hack/make/test-integration
|
||||
)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
set -e -u -o pipefail
|
||||
|
||||
ARCH=$(uname -m)
|
||||
if [ "$ARCH" == "x86_64" ]; then
|
||||
if [ "$ARCH" = "x86_64" ]; then
|
||||
ARCH="amd64"
|
||||
fi
|
||||
|
||||
|
@ -17,8 +17,13 @@ integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(
|
|||
grep -vE '(^/tests/integration($|/internal)|/testdata)')"}
|
||||
|
||||
run_test_integration() {
|
||||
[[ "$TESTFLAGS" != *-check.f* ]] && run_test_integration_suites
|
||||
set_platform_timeout
|
||||
if [[ "$TESTFLAGS" != *-check.f* ]]; then
|
||||
run_test_integration_suites
|
||||
fi
|
||||
if [[ "$TESTFLAGS" != *-test.run* ]]; then
|
||||
run_test_integration_legacy_suites
|
||||
fi
|
||||
}
|
||||
|
||||
run_test_integration_suites() {
|
||||
|
@ -68,5 +73,16 @@ test_env() {
|
|||
)
|
||||
}
|
||||
|
||||
set_platform_timeout() {
|
||||
# Test timeout.
|
||||
if [ "${DOCKER_ENGINE_GOARCH}" = "arm64" ] || [ "${DOCKER_ENGINE_GOARCH}" = "arm" ]; then
|
||||
: ${TIMEOUT:=10m}
|
||||
elif [ "${DOCKER_ENGINE_GOARCH}" = "windows" ]; then
|
||||
: ${TIMEOUT:=8m}
|
||||
else
|
||||
: ${TIMEOUT:=5m}
|
||||
fi
|
||||
}
|
||||
|
||||
sh /scripts/ensure-emptyfs.sh
|
||||
run_test_integration
|
||||
|
|
|
@ -1,34 +1,28 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Run unit tests
|
||||
# Run unit tests and create report
|
||||
#
|
||||
# TESTFLAGS - add additional test flags. Ex:
|
||||
#
|
||||
# TESTFLAGS="-v -run TestBuild" hack/test/unit
|
||||
# TESTFLAGS='-v -run TestBuild' hack/test/unit
|
||||
#
|
||||
# TESTDIRS - run tests for specified packages. Ex:
|
||||
#
|
||||
# TESTDIRS="./pkg/term" hack/test/unit
|
||||
# TESTDIRS='./pkg/term' hack/test/unit
|
||||
#
|
||||
set -eu -o pipefail
|
||||
|
||||
TESTFLAGS+=" -test.timeout=${TIMEOUT:-5m}"
|
||||
BUILDFLAGS=( -tags "netgo seccomp libdm_no_deferred_remove" )
|
||||
TESTDIRS="${TESTDIRS:-"./..."}"
|
||||
|
||||
exclude_paths="/vendor/|/integration"
|
||||
BUILDFLAGS=( -tags 'netgo seccomp libdm_no_deferred_remove' )
|
||||
TESTFLAGS+="-test.timeout=${TIMEOUT:-5m}"
|
||||
TESTDIRS="${TESTDIRS:-./...}"
|
||||
exclude_paths='/vendor/|/integration'
|
||||
pkg_list=$(go list $TESTDIRS | grep -vE "($exclude_paths)")
|
||||
|
||||
for pkg in $pkg_list; do
|
||||
go test "${BUILDFLAGS[@]}" \
|
||||
mkdir -p bundles
|
||||
gotestsum --format=standard-quiet --jsonfile=bundles/go-test-report.json --junitfile=bundles/junit-report.xml -- \
|
||||
"${BUILDFLAGS[@]}" \
|
||||
-cover \
|
||||
-coverprofile=profile.out \
|
||||
-coverprofile=bundles/profile.out \
|
||||
-covermode=atomic \
|
||||
${TESTFLAGS} \
|
||||
"${pkg}"
|
||||
|
||||
if test -f profile.out; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
||||
${pkg_list}
|
||||
|
|
|
@ -8,6 +8,6 @@ files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/swagger.yaml' ||
|
|||
unset IFS
|
||||
|
||||
if [ ${#files[@]} -gt 0 ]; then
|
||||
yamllint -c ${SCRIPTDIR}/.swagger-yamllint api/swagger.yaml
|
||||
LANG=C.UTF-8 yamllint -c ${SCRIPTDIR}/.swagger-yamllint api/swagger.yaml
|
||||
swagger validate api/swagger.yaml
|
||||
fi
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
package main
|
|
@ -1 +0,0 @@
|
|||
package cmd
|
|
@ -1 +0,0 @@
|
|||
package main
|
|
@ -1 +0,0 @@
|
|||
package cmd
|
|
@ -1 +0,0 @@
|
|||
package main
|
|
@ -33,6 +33,13 @@ type testingT interface {
|
|||
Fatalf(string, ...interface{})
|
||||
}
|
||||
|
||||
type namer interface {
|
||||
Name() string
|
||||
}
|
||||
type testNamer interface {
|
||||
TestName() string
|
||||
}
|
||||
|
||||
type logT interface {
|
||||
Logf(string, ...interface{})
|
||||
}
|
||||
|
@ -88,11 +95,17 @@ func New(t testingT, ops ...func(*Daemon)) *Daemon {
|
|||
if ht, ok := t.(test.HelperT); ok {
|
||||
ht.Helper()
|
||||
}
|
||||
t.Log("Creating a new daemon")
|
||||
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
|
||||
if dest == "" {
|
||||
dest = os.Getenv("DEST")
|
||||
}
|
||||
switch v := t.(type) {
|
||||
case namer:
|
||||
dest = filepath.Join(dest, v.Name())
|
||||
case testNamer:
|
||||
dest = filepath.Join(dest, v.TestName())
|
||||
}
|
||||
t.Logf("Creating a new daemon at: %s", dest)
|
||||
assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable")
|
||||
|
||||
storageDriver := os.Getenv("DOCKER_GRAPHDRIVER")
|
||||
|
@ -620,7 +633,9 @@ func (d *Daemon) getClientConfig() (*clientConfig, error) {
|
|||
return nil, err
|
||||
}
|
||||
transport.DisableKeepAlives = true
|
||||
|
||||
if proto == "unix" {
|
||||
addr = filepath.Base(addr)
|
||||
}
|
||||
return &clientConfig{
|
||||
transport: transport,
|
||||
scheme: scheme,
|
||||
|
|
Loading…
Reference in a new issue