Merge pull request #337 from thaJeztah/19.03_backport_jenkinsfile

[19.03 backport] Jenkinsfile and CI updates
This commit is contained in:
Sebastiaan van Stijn 2019-09-06 23:06:33 +02:00 committed by GitHub
commit c8ef549bf6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 296 additions and 140 deletions

View file

@ -26,6 +26,7 @@
ARG CROSS="false"
ARG GO_VERSION=1.12.8
ARG DEBIAN_FRONTEND=noninteractive
FROM golang:${GO_VERSION}-stretch AS base
ARG APT_MIRROR
@ -33,19 +34,21 @@ RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
FROM base AS criu
ARG DEBIAN_FRONTEND
# Install CRIU for checkpoint/restore support
ENV CRIU_VERSION 3.11
# Install dependency packages specific to criu
RUN apt-get update && apt-get install -y \
RUN apt-get update && apt-get install -y --no-install-recommends \
libnet-dev \
libprotobuf-c0-dev \
libprotobuf-c-dev \
libprotobuf-dev \
libnl-3-dev \
libcap-dev \
protobuf-compiler \
protobuf-c-compiler \
python-protobuf \
&& mkdir -p /usr/src/criu \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir -p /usr/src/criu \
&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
&& cd /usr/src/criu \
&& make \
@ -84,7 +87,11 @@ RUN set -x \
&& rm -rf "$GOPATH"
FROM base AS frozen-images
RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends
ARG DEBIAN_FRONTEND
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
jq \
&& rm -rf /var/lib/apt/lists/*
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /
RUN /download-frozen-image-v2.sh /build \
@ -98,32 +105,34 @@ RUN /download-frozen-image-v2.sh /build \
FROM base AS cross-false
FROM base AS cross-true
ARG DEBIAN_FRONTEND
RUN dpkg --add-architecture armhf
RUN dpkg --add-architecture arm64
RUN dpkg --add-architecture armel
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
apt-get update \
&& apt-get install -y --no-install-recommends \
apt-get update && apt-get install -y --no-install-recommends \
crossbuild-essential-armhf \
crossbuild-essential-arm64 \
crossbuild-essential-armel; \
crossbuild-essential-armel \
&& rm -rf /var/lib/apt/lists/*; \
fi
FROM cross-${CROSS} as dev-base
FROM dev-base AS runtime-dev-cross-false
RUN apt-get update && apt-get install -y \
ARG DEBIAN_FRONTEND
RUN apt-get update && apt-get install -y --no-install-recommends \
libapparmor-dev \
libseccomp-dev
libseccomp-dev \
&& rm -rf /var/lib/apt/lists/*
FROM cross-true AS runtime-dev-cross-true
ARG DEBIAN_FRONTEND
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
# on non-amd64 systems.
# Additionally, the crossbuild-amd64 is currently only on debian:buster, so
# other architectures cannnot crossbuild amd64.
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
apt-get update \
&& apt-get install -y \
apt-get update && apt-get install -y --no-install-recommends \
libseccomp-dev:armhf \
libseccomp-dev:arm64 \
libseccomp-dev:armel \
@ -133,7 +142,8 @@ RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
# install this arches seccomp here due to compat issues with the v0 builder
# This is as opposed to inheriting from runtime-dev-cross-false
libapparmor-dev \
libseccomp-dev; \
libseccomp-dev \
&& rm -rf /var/lib/apt/lists/*; \
fi
FROM runtime-dev-cross-${CROSS} AS runtime-dev
@ -151,7 +161,10 @@ COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
FROM dev-base AS containerd
RUN apt-get update && apt-get install -y btrfs-tools
ARG DEBIAN_FRONTEND
RUN apt-get update && apt-get install -y --no-install-recommends \
btrfs-tools \
&& rm -rf /var/lib/apt/lists/*
ENV INSTALL_BINARY_NAME=containerd
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
@ -188,7 +201,11 @@ COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
FROM dev-base AS tini
RUN apt-get update && apt-get install -y cmake vim-common
ARG DEBIAN_FRONTEND
RUN apt-get update && apt-get install -y --no-install-recommends \
cmake \
vim-common \
&& rm -rf /var/lib/apt/lists/*
COPY hack/dockerfile/install/install.sh ./install.sh
ENV INSTALL_BINARY_NAME=tini
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
@ -203,6 +220,7 @@ COPY ./contrib/dockerd-rootless.sh /build
# TODO: Some of this is only really needed for testing, it would be nice to split this up
FROM runtime-dev AS dev
ARG DEBIAN_FRONTEND
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
# Let us use a .bashrc file
@ -213,7 +231,7 @@ RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
RUN ldconfig
# This should only install packages that are specifically needed for the dev environment and nothing else
# Do you really need to add another package here? Can it be done in a different build stage?
RUN apt-get update && apt-get install -y \
RUN apt-get update && apt-get install -y --no-install-recommends \
apparmor \
aufs-tools \
bash-completion \
@ -230,6 +248,7 @@ RUN apt-get update && apt-get install -y \
pigz \
python3-pip \
python3-setuptools \
python3-wheel \
thin-provisioning-tools \
vim \
vim-common \
@ -240,7 +259,7 @@ RUN apt-get update && apt-get install -y \
libprotobuf-c1 \
libnet1 \
libnl-3-200 \
--no-install-recommends
&& rm -rf /var/lib/apt/lists/*
RUN pip3 install yamllint==1.16.0

153
Jenkinsfile vendored
View file

@ -12,8 +12,9 @@ pipeline {
booleanParam(name: 'janky', defaultValue: true, description: 'x86 Build/Test')
booleanParam(name: 'z', defaultValue: true, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'powerpc', defaultValue: true, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: false, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: true, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'skip_dco', defaultValue: false, description: 'Skip the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
@ -24,6 +25,20 @@ pipeline {
TIMEOUT = '120m'
}
stages {
stage('DCO-check') {
when {
beforeAgent true
expression { !params.skip_dco }
}
agent { label 'linux' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
alpine sh -c 'apk add --no-cache -q git bash && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
@ -94,12 +109,15 @@ pipeline {
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
sh '''
echo 'Creating docker-py-bundles.tar.gz'
tar -czf docker-py-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: 'docker-py-bundles.tar.gz'
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
@ -185,12 +203,15 @@ pipeline {
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
sh '''
echo 'Creating unit-bundles.tar.gz'
tar -czvf unit-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
archiveArtifacts artifacts: 'unit-bundles.tar.gz'
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
@ -302,13 +323,16 @@ pipeline {
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
sh '''
echo "Creating janky-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf janky-bundles.tar.gz
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=janky
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: 'janky-bundles.tar.gz'
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
@ -396,13 +420,16 @@ pipeline {
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
sh '''
echo "Creating s390x-integration-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-bundles.tar.gz
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: 's390x-integration-bundles.tar.gz'
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
@ -471,12 +498,16 @@ pipeline {
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
sh '''
echo "Creating s390x-integration-cli-bundles.tar.gz"
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-cli-bundles.tar.gz
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: 's390x-integration-cli-bundles.tar.gz'
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
@ -562,13 +593,16 @@ pipeline {
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
sh '''
echo "Creating powerpc-integration-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-bundles.tar.gz
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=powerpc-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: 'powerpc-integration-bundles.tar.gz'
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
@ -635,12 +669,16 @@ pipeline {
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
sh '''
echo "Creating powerpc-integration-cli-bundles.tar.gz"
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-cli-bundles.tar.gz
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=powerpc-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: 'powerpc-integration-cli-bundles.tar.gz'
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
@ -653,10 +691,20 @@ pipeline {
beforeAgent true
expression { params.windowsRS1 }
}
environment {
DOCKER_BUILDKIT = '0'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI-$BUILD_NUMBER"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
label 'windows-rs1'
customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker'
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
@ -670,7 +718,9 @@ pipeline {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
.\\hack\\ci\\windows.ps1
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/jhowardmsft/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
@ -682,10 +732,20 @@ pipeline {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI-$BUILD_NUMBER"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
label 'windows-rs5'
customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker'
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
@ -699,7 +759,8 @@ pipeline {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
.\\hack\\ci\\windows.ps1
Invoke-WebRequest https://github.com/jhowardmsft/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}

View file

@ -62,6 +62,7 @@ DOCKER_ENVS := \
-e TESTFLAGS \
-e TESTFLAGS_INTEGRATION \
-e TESTFLAGS_INTEGRATION_CLI \
-e TEST_FILTER \
-e TIMEOUT \
-e VALIDATE_REPO \
-e VALIDATE_BRANCH \
@ -86,6 +87,7 @@ BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
# DOCKER_MOUNT can be overriden, but use at your own risk!
ifndef DOCKER_MOUNT
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
DOCKER_MOUNT := $(if $(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT):$(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT))
# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
# The volume will be cleaned up when the container is removed due to `--rm`.

View file

@ -67,6 +67,8 @@ If a remote daemon is detected, the test will be skipped.
## Running tests
### Unit Tests
To run the unit test suite:
```
@ -82,12 +84,33 @@ The following environment variables may be used to run a subset of tests:
* `TESTFLAGS` - flags passed to `go test`, to run tests which match a pattern
use `TESTFLAGS="-test.run TestNameOrPrefix"`
### Integration Tests
To run the integration test suite:
```
make test-integration
```
This make target runs both the "integration" suite and the "integration-cli"
suite.
You can specify which integration test dirs to build and run by specifying
the list of dirs in the TEST_INTEGRATION_DIR environment variable.
You can also explicitly skip either suite by setting (any value) in
TEST_SKIP_INTEGRATION and/or TEST_SKIP_INTEGRATION_CLI environment variables.
Flags specific to each suite can be set in the TESTFLAGS_INTEGRATION and
TESTFLAGS_INTEGRATION_CLI environment variables.
If all you want is to specity a test filter to run, you can set the
`TEST_FILTER` environment variable. This ends up getting passed directly to `go
test -run` (or `go test -check-f`, dpenending on the test suite). It will also
automatically set the other above mentioned environment variables accordingly.
### Go Version
You can change a version of golang used for building stuff that is being tested
by setting `GO_VERSION` variable, for example:

13
hack/ci/master Executable file
View file

@ -0,0 +1,13 @@
#!/usr/bin/env bash
# Entrypoint for jenkins master CI build
set -eu -o pipefail
hack/validate/default
hack/test/unit
hack/make.sh \
binary-daemon \
dynbinary \
test-docker-py \
test-integration \
cross

View file

@ -78,6 +78,9 @@ if ($env:BUILD_TAG -match "-WoW") { $env:LCOW_MODE="" }
# docker integration tests are also coded to use the same
# environment variable, and if no set, defaults to microsoft/windowsservercore
#
# WINDOWS_BASE_IMAGE_TAG if defined, uses that as the tag name for the base image.
# if no set, defaults to latest
#
# LCOW_BASIC_MODE if defined, does very basic LCOW verification. Ultimately we
# want to run the entire CI suite from docker, but that's a way off.
#
@ -139,7 +142,7 @@ Function Nuke-Everything {
}
$allImages = $(docker images --format "{{.Repository}}#{{.ID}}")
$toRemove = ($allImages | Select-String -NotMatch "windowsservercore","nanoserver","docker")
$toRemove = ($allImages | Select-String -NotMatch "servercore","nanoserver","docker")
$imageCount = ($toRemove | Measure-Object -line).Lines
if ($imageCount -gt 0) {
@ -200,12 +203,8 @@ Function Nuke-Everything {
$count=(Get-ChildItem $reg | Measure-Object).Count
if ($count -gt 0) {
Write-Warning "There are $count NdisAdapters leaked under Psched\Parameters"
if ($env:COMPUTERNAME -match "jenkins-rs1-") {
Write-Warning "Cleaning Psched..."
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
} else {
Write-Warning "Not cleaning as not a production RS1 server"
}
Write-Warning "Cleaning Psched..."
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
}
# TODO: This should be able to be removed in August 2017 update. Only needed for RS1
@ -213,12 +212,8 @@ Function Nuke-Everything {
$count=(Get-ChildItem $reg | Measure-Object).Count
if ($count -gt 0) {
Write-Warning "There are $count NdisAdapters leaked under WFPLWFS\Parameters"
if ($env:COMPUTERNAME -match "jenkins-rs1-") {
Write-Warning "Cleaning WFPLWFS..."
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
} else {
Write-Warning "Not cleaning as not a production RS1 server"
}
Write-Warning "Cleaning WFPLWFS..."
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
}
} catch {
# Don't throw any errors onwards Throw $_
@ -261,6 +256,18 @@ Try {
# Make sure docker-ci-zap is installed
if ($null -eq (Get-Command "docker-ci-zap" -ErrorAction SilentlyContinue)) { Throw "ERROR: docker-ci-zap is not installed or not found on path" }
# Make sure Windows Defender is disabled
$defender = $false
Try {
$status = Get-MpComputerStatus
if ($status) {
if ($status.RealTimeProtectionEnabled) {
$defender = $true
}
}
} Catch {}
if ($defender) { Throw "ERROR: Windows Defender real time protection must be disabled for integration tests" }
# Make sure SOURCES_DRIVE is set
if ($null -eq $env:SOURCES_DRIVE) { Throw "ERROR: Environment variable SOURCES_DRIVE is not set" }
@ -345,14 +352,16 @@ Try {
Write-Host -ForegroundColor Green "INFO: docker load of"$ControlDaemonBaseImage" completed successfully"
} else {
# We need to docker pull it instead. It will come in directly as microsoft/imagename:latest
Write-Host -ForegroundColor Green $("INFO: Pulling microsoft/"+$ControlDaemonBaseImage+":latest from docker hub. This may take some time...")
Write-Host -ForegroundColor Green $("INFO: Pulling $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG from docker hub. This may take some time...")
$ErrorActionPreference = "SilentlyContinue"
docker pull $("microsoft/"+$ControlDaemonBaseImage)
docker pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG"
$ErrorActionPreference = "Stop"
if (-not $LastExitCode -eq 0) {
Throw $("ERROR: Failed to docker pull microsoft/"+$ControlDaemonBaseImage+":latest.")
Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG.")
}
Write-Host -ForegroundColor Green $("INFO: docker pull of microsoft/"+$ControlDaemonBaseImage+":latest completed successfully")
Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG completed successfully")
Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage")
docker tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage
}
} else {
Write-Host -ForegroundColor Green "INFO: Image"$("microsoft/"+$ControlDaemonBaseImage+":latest")"is already loaded in the control daemon"
@ -663,17 +672,20 @@ Try {
if ($null -eq $env:WINDOWS_BASE_IMAGE) {
$env:WINDOWS_BASE_IMAGE="microsoft/windowsservercore"
}
if ($null -eq $env:WINDOWS_BASE_IMAGE_TAG) {
$env:WINDOWS_BASE_IMAGE_TAG="latest"
}
# Lowercase and make sure it has a microsoft/ prefix
$env:WINDOWS_BASE_IMAGE = $env:WINDOWS_BASE_IMAGE.ToLower()
if ($($env:WINDOWS_BASE_IMAGE -Split "/")[0] -ne "microsoft") {
Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/"
if (! $($env:WINDOWS_BASE_IMAGE -Split "/")[0] -match "microsoft") {
Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/ or mcr.microsoft.com/"
}
Write-Host -ForegroundColor Green "INFO: Base image for tests is $env:WINDOWS_BASE_IMAGE"
$ErrorActionPreference = "SilentlyContinue"
if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String $($env:WINDOWS_BASE_IMAGE+":latest") | Measure-Object -Line).Lines) -eq 0) {
if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" | Measure-Object -Line).Lines) -eq 0) {
# Try the internal azure CI image version or Microsoft internal corpnet where the base image is already pre-prepared on the disk,
# either through Invoke-DockerCI or, in the case of Azure CI servers, baked into the VHD at the same location.
if (Test-Path $("c:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")) {
@ -686,18 +698,20 @@ Try {
}
Write-Host -ForegroundColor Green "INFO: docker load of"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]" into daemon under test completed successfully"
} else {
# We need to docker pull it instead. It will come in directly as microsoft/imagename:latest
Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":latest from docker hub into daemon under test. This may take some time...")
# We need to docker pull it instead. It will come in directly as microsoft/imagename:tagname
Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":"+$env:WINDOWS_BASE_IMAGE_TAG+" from docker hub into daemon under test. This may take some time...")
$ErrorActionPreference = "SilentlyContinue"
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull $($env:WINDOWS_BASE_IMAGE)
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG"
$ErrorActionPreference = "Stop"
if (-not $LastExitCode -eq 0) {
Throw $("ERROR: Failed to docker pull "+$env:WINDOWS_BASE_IMAGE+":latest into daemon under test.")
Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test.")
}
Write-Host -ForegroundColor Green $("INFO: docker pull of "+$env:WINDOWS_BASE_IMAGE+":latest into daemon under test completed successfully")
Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test completed successfully")
Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage in daemon under test")
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage
}
} else {
Write-Host -ForegroundColor Green "INFO: Image"$($env:WINDOWS_BASE_IMAGE+":latest")"is already loaded in the daemon under test"
Write-Host -ForegroundColor Green "INFO: Image $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is already loaded in the daemon under test"
}
@ -705,7 +719,7 @@ Try {
$ErrorActionPreference = "SilentlyContinue"
$dutimgVersion = $(&"$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" inspect $($env:WINDOWS_BASE_IMAGE) --format "{{.OsVersion}}")
$ErrorActionPreference = "Stop"
Write-Host -ForegroundColor Green $("INFO: Version of "+$env:WINDOWS_BASE_IMAGE+":latest is '"+$dutimgVersion+"'")
Write-Host -ForegroundColor Green $("INFO: Version of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is '"+$dutimgVersion+"'")
}
# Run the validation tests unless SKIP_VALIDATION_TESTS is defined.
@ -752,14 +766,7 @@ Try {
#if ($bbCount -eq 0) {
Write-Host -ForegroundColor Green "INFO: Building busybox"
$ErrorActionPreference = "SilentlyContinue"
# This is a temporary hack for nanoserver
if ($env:WINDOWS_BASE_IMAGE -ne "microsoft/windowsservercore") {
Write-Host -ForegroundColor Red "HACK HACK HACK - Building 64-bit nanoserver busybox image"
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox64/v1.1/Dockerfile | Out-Host)
} else {
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox/v1.1/Dockerfile | Out-Host)
}
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox/v1.1/Dockerfile | Out-Host)
$ErrorActionPreference = "Stop"
if (-not($LastExitCode -eq 0)) {
Throw "ERROR: Failed to build busybox image"

View file

@ -174,18 +174,11 @@ bundle() {
main() {
if [ -z "${KEEPBUNDLE-}" ]; then
echo "Removing bundles/"
rm -rf "bundles/*"
rm -rf bundles/*
echo
fi
mkdir -p bundles
# Windows and symlinks don't get along well
if [ "$(go env GOHOSTOS)" != 'windows' ]; then
rm -f bundles/latest
# preserve latest symlink for backward compatibility
ln -sf . bundles/latest
fi
if [ $# -lt 1 ]; then
bundles=(${DEFAULT_BUNDLES[@]})
else

View file

@ -17,17 +17,45 @@ if [[ "${TESTFLAGS}" = *-test.run* ]]; then
fi
if [ -z ${MAKEDIR} ]; then
export MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ -z "${MAKEDIR}" ]; then
MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export MAKEDIR
fi
source "$MAKEDIR/.go-autogen"
source "${MAKEDIR}/.go-autogen"
# Set defaults
: ${TEST_REPEAT:=1}
: ${TESTFLAGS:=}
: ${TESTDEBUG:=}
: "${TEST_REPEAT:=1}"
: "${TESTFLAGS:=}"
: "${TESTDEBUG:=}"
integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)"}
setup_integration_test_filter() {
if [ -z "${TEST_FILTER}" ]; then
return
fi
if [ -z "${TEST_SKIP_INTEGRATION}" ]; then
: "${TEST_INTEGRATION_DIR:=$(grep -rl "func\ .*${TEST_FILTER}.*\(t\ \*testing\.T\)" ./integration | grep '_test\.go' | xargs -I file dirname file | uniq)}"
if [ -z "${TEST_INTEGRATION_DIR}" ]; then
echo "Skipping integration tests since the supplied filter \"${TEST_FILTER}\" omits all integration tests"
TEST_SKIP_INTEGRATION=1
else
TESTFLAGS_INTEGRATION+="-test.run ${TEST_FILTER}"
fi
fi
if [ -z "${TEST_SKIP_INTEGRATION_CLI}" ]; then
# ease up on the filtering here since CLI suites are namespaced by an object
if grep -r "${TEST_FILTER}.*\(c\ \*check\.C\)" ./integration-cli | grep -q '_test\.go$'; then
TEST_SKIP_INTEGRATION_CLI=1
echo "Skipping integration-cli tests since the supplied filter \"${TEST_FILTER}\" omits all integration-cli tests"
else
TESTFLAGS_INTEGRATION_CLI+="-check.f ${TEST_FILTER}"
fi
fi
}
setup_integration_test_filter
integration_api_dirs="${TEST_INTEGRATION_DIR:-$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)}"
run_test_integration() {
set_platform_timeout
@ -45,6 +73,7 @@ run_test_integration_suites() {
if ! (
cd "$dir"
echo "Running $PWD flags=${flags}"
# shellcheck disable=SC2086
test_env ./test.main ${flags}
); then exit 1; fi
done
@ -55,12 +84,13 @@ run_test_integration_legacy_suites() {
flags="-check.v -check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS ${TESTFLAGS_INTEGRATION_CLI}"
cd integration-cli
echo "Running $PWD flags=${flags}"
# shellcheck disable=SC2086
test_env ./test.main $flags
)
}
build_test_suite_binaries() {
if [ ${DOCKER_INTEGRATION_TESTS_VERIFIED-} ]; then
if [ -n "${DOCKER_INTEGRATION_TESTS_VERIFIED}" ]; then
echo "Skipping building test binaries; as DOCKER_INTEGRATION_TESTS_VERIFIED is set"
return
fi
@ -85,6 +115,7 @@ build_test_suite_binary() {
cleanup_test_suite_binaries() {
[ -n "$TESTDEBUG" ] && return
echo "Removing test suite binaries"
# shellcheck disable=SC2038
find integration* -name test.main | xargs -r rm
}
@ -133,6 +164,7 @@ error_on_leaked_containerd_shims() {
awk '$2 == "containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }')
if [ -n "$leftovers" ]; then
ps aux
# shellcheck disable=SC2086
kill -9 ${leftovers} 2> /dev/null
echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!"
exit 1
@ -142,11 +174,11 @@ error_on_leaked_containerd_shims() {
set_platform_timeout() {
# Test timeout.
if [ "${DOCKER_ENGINE_GOARCH}" = "arm64" ] || [ "${DOCKER_ENGINE_GOARCH}" = "arm" ]; then
: ${TIMEOUT:=10m}
: "${TIMEOUT:=10m}"
elif [ "${DOCKER_ENGINE_GOARCH}" = "windows" ]; then
: ${TIMEOUT:=8m}
: "${TIMEOUT:=8m}"
else
: ${TIMEOUT:=5m}
: "${TIMEOUT:=5m}"
fi
if [ "${TEST_REPEAT}" -gt 1 ]; then

View file

@ -2,28 +2,34 @@
set -e -o pipefail
source hack/validate/.validate
new_tests=$(
validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' |
grep -E '^(\+func )(.*)(\*testing)' || true
)
if [ -z "$new_tests" ]; then
echo 'No new tests added to integration.'
return
fi
echo
echo "Found new integrations tests:"
echo "$new_tests"
echo "Running stress test for them."
run_integration_flaky() {
new_tests=$(
validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' |
grep -E '^(\+func Test)(.*)(\*testing\.T\))' || true
)
(
TESTARRAY=$(echo "$new_tests" | sed 's/+func //' | awk -F'\\(' '{print $1}' | tr '\n' '|')
# Note: TEST_REPEAT will make the test suite run 5 times, restarting the daemon
# and each test will run 5 times in a row under the same daemon.
# This will make a total of 25 runs for each test in TESTARRAY.
export TEST_REPEAT=5
export TESTFLAGS="-test.count ${TEST_REPEAT} -test.run ${TESTARRAY%?}"
echo "Using test flags: $TESTFLAGS"
source hack/make/test-integration
)
if [ -z "$new_tests" ]; then
echo 'No new tests added to integration.'
return
fi
echo
echo "Found new integrations tests:"
echo "$new_tests"
echo "Running stress test for them."
(
TESTARRAY=$(echo "$new_tests" | sed 's/+func //' | awk -F'\\(' '{print $1}' | tr '\n' '|')
# Note: TEST_REPEAT will make the test suite run 5 times, restarting the daemon
# and each test will run 5 times in a row under the same daemon.
# This will make a total of 25 runs for each test in TESTARRAY.
export TEST_REPEAT=5
export TESTFLAGS="-test.count ${TEST_REPEAT} -test.run ${TESTARRAY%?}"
echo "Using test flags: $TESTFLAGS"
source hack/make/test-integration
)
}
run_integration_flaky

View file

@ -1706,7 +1706,7 @@ func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) {
out = strings.TrimSpace(out)
expected := "root"
if testEnv.OSType == "windows" {
if strings.Contains(testEnv.PlatformDefaults.BaseImage, "windowsservercore") {
if strings.Contains(testEnv.PlatformDefaults.BaseImage, "servercore") {
expected = `user manager\containeradministrator`
} else {
expected = `ContainerAdministrator` // nanoserver